From f0b87e28fb9bb322c00f09b7649ed5fbc9211699 Mon Sep 17 00:00:00 2001 From: SDKAuto Date: Tue, 30 Apr 2024 04:13:28 +0000 Subject: [PATCH] CodeGen from PR 27576 in Azure/azure-rest-api-specs Merge 482ba279cb21fbf7f4e6bbd4ad14b6c14aa979d7 into b8af2fc3af08e42d2df6f48d1c84070ef1f6589b --- sdk/face/azure-ai-vision-face/CHANGELOG.md | 5 + sdk/face/azure-ai-vision-face/LICENSE | 21 + sdk/face/azure-ai-vision-face/MANIFEST.in | 8 + sdk/face/azure-ai-vision-face/README.md | 45 + sdk/face/azure-ai-vision-face/_meta.json | 7 + .../azure-ai-vision-face/azure/__init__.py | 1 + .../azure-ai-vision-face/azure/ai/__init__.py | 1 + .../azure/ai/vision/__init__.py | 1 + .../azure/ai/vision/face/__init__.py | 30 + .../azure/ai/vision/face/_client.py | 272 + .../azure/ai/vision/face/_configuration.py | 183 + .../azure/ai/vision/face/_model_base.py | 887 + .../ai/vision/face/_operations/__init__.py | 23 + .../ai/vision/face/_operations/_operations.py | 20437 ++++++++++++++++ .../ai/vision/face/_operations/_patch.py | 20 + .../azure/ai/vision/face/_patch.py | 20 + .../azure/ai/vision/face/_serialization.py | 1998 ++ .../azure/ai/vision/face/_vendor.py | 91 + .../azure/ai/vision/face/_version.py | 9 + .../azure/ai/vision/face/aio/__init__.py | 27 + .../azure/ai/vision/face/aio/_client.py | 284 + .../ai/vision/face/aio/_configuration.py | 189 + .../vision/face/aio/_operations/__init__.py | 23 + .../face/aio/_operations/_operations.py | 18070 ++++++++++++++ .../ai/vision/face/aio/_operations/_patch.py | 20 + .../azure/ai/vision/face/aio/_patch.py | 20 + .../azure/ai/vision/face/aio/_vendor.py | 50 + .../azure/ai/vision/face/models/__init__.py | 167 + .../azure/ai/vision/face/models/_enums.py | 258 + .../azure/ai/vision/face/models/_models.py | 2763 +++ .../azure/ai/vision/face/models/_patch.py | 20 + .../azure/ai/vision/face/py.typed | 1 + .../azure-ai-vision-face/dev_requirements.txt | 3 + .../azure-ai-vision-face/sdk_packaging.toml | 2 + sdk/face/azure-ai-vision-face/setup.py | 72 + .../azure-ai-vision-face/tsp-location.yaml | 4 + sdk/face/ci.yml | 34 + 37 files changed, 46066 insertions(+) create mode 100644 sdk/face/azure-ai-vision-face/CHANGELOG.md create mode 100644 sdk/face/azure-ai-vision-face/LICENSE create mode 100644 sdk/face/azure-ai-vision-face/MANIFEST.in create mode 100644 sdk/face/azure-ai-vision-face/README.md create mode 100644 sdk/face/azure-ai-vision-face/_meta.json create mode 100644 sdk/face/azure-ai-vision-face/azure/__init__.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/__init__.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/__init__.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/__init__.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/_client.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/_configuration.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/_model_base.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/_operations/__init__.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/_operations/_operations.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/_operations/_patch.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/_patch.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/_serialization.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/_vendor.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/_version.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/__init__.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_client.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_configuration.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_operations/__init__.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_operations/_operations.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_operations/_patch.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_patch.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_vendor.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/__init__.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/_enums.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/_models.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/_patch.py create mode 100644 sdk/face/azure-ai-vision-face/azure/ai/vision/face/py.typed create mode 100644 sdk/face/azure-ai-vision-face/dev_requirements.txt create mode 100644 sdk/face/azure-ai-vision-face/sdk_packaging.toml create mode 100644 sdk/face/azure-ai-vision-face/setup.py create mode 100644 sdk/face/azure-ai-vision-face/tsp-location.yaml create mode 100644 sdk/face/ci.yml diff --git a/sdk/face/azure-ai-vision-face/CHANGELOG.md b/sdk/face/azure-ai-vision-face/CHANGELOG.md new file mode 100644 index 000000000000..628743d283a9 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/CHANGELOG.md @@ -0,0 +1,5 @@ +# Release History + +## 1.0.0b1 (1970-01-01) + +- Initial version diff --git a/sdk/face/azure-ai-vision-face/LICENSE b/sdk/face/azure-ai-vision-face/LICENSE new file mode 100644 index 000000000000..63447fd8bbbf --- /dev/null +++ b/sdk/face/azure-ai-vision-face/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) Microsoft Corporation. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/sdk/face/azure-ai-vision-face/MANIFEST.in b/sdk/face/azure-ai-vision-face/MANIFEST.in new file mode 100644 index 000000000000..a8d93584ffcc --- /dev/null +++ b/sdk/face/azure-ai-vision-face/MANIFEST.in @@ -0,0 +1,8 @@ +include *.md +include LICENSE +include azure/ai/vision/face/py.typed +recursive-include tests *.py +recursive-include samples *.py *.md +include azure/__init__.py +include azure/ai/__init__.py +include azure/ai/vision/__init__.py \ No newline at end of file diff --git a/sdk/face/azure-ai-vision-face/README.md b/sdk/face/azure-ai-vision-face/README.md new file mode 100644 index 000000000000..7ba6d76e8240 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/README.md @@ -0,0 +1,45 @@ + + +# Azure Ai Vision Face client library for Python + + +## Getting started + +### Install the package + +```bash +python -m pip install azure-ai-vision-face +``` + +#### Prequisites + +- Python 3.8 or later is required to use this package. +- You need an [Azure subscription][azure_sub] to use this package. +- An existing Azure Ai Vision Face instance. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +see the Code of Conduct FAQ or contact opencode@microsoft.com with any +additional questions or comments. + + +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token +[azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials +[azure_identity_pip]: https://pypi.org/project/azure-identity/ +[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential +[pip]: https://pypi.org/project/pip/ +[azure_sub]: https://azure.microsoft.com/free/ + diff --git a/sdk/face/azure-ai-vision-face/_meta.json b/sdk/face/azure-ai-vision-face/_meta.json new file mode 100644 index 000000000000..ebf0ddcdb2f0 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/_meta.json @@ -0,0 +1,7 @@ +{ + "commit": "37acfe2967e5e1be1169146ac461eb1875c9476e", + "repository_url": "https://github.com/Azure/azure-rest-api-specs", + "typespec_src": "specification/ai/Face", + "@azure-tools/typespec-python": "0.23.8", + "@autorest/python": "6.13.15" +} \ No newline at end of file diff --git a/sdk/face/azure-ai-vision-face/azure/__init__.py b/sdk/face/azure-ai-vision-face/azure/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/face/azure-ai-vision-face/azure/ai/__init__.py b/sdk/face/azure-ai-vision-face/azure/ai/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/__init__.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/__init__.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/__init__.py new file mode 100644 index 000000000000..4cc609842e93 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/__init__.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._client import FaceClient +from ._client import FaceAdministrationClient +from ._client import FaceSessionClient +from ._version import VERSION + +__version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "FaceClient", + "FaceAdministrationClient", + "FaceSessionClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_client.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_client.py new file mode 100644 index 000000000000..5f7d7fc0915c --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_client.py @@ -0,0 +1,272 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, TYPE_CHECKING, Union + +from azure.core import PipelineClient +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse + +from ._configuration import ( + FaceAdministrationClientConfiguration, + FaceClientConfiguration, + FaceSessionClientConfiguration, +) +from ._operations import ( + FaceAdministrationClientOperationsMixin, + FaceClientOperationsMixin, + FaceSessionClientOperationsMixin, +) +from ._serialization import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class FaceClient(FaceClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """FaceClient. + + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: + https://{resource-name}.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: API Version. Default value is "v1.1-preview.1". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str or ~azure.ai.vision.face.models.Versions + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + _endpoint = "{endpoint}/face/{apiVersion}" + self._config = FaceClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "FaceClient": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) + + +class FaceAdministrationClient( + FaceAdministrationClientOperationsMixin +): # pylint: disable=client-accepts-api-version-keyword + """FaceAdministrationClient. + + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: + https://{resource-name}.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: API Version. Default value is "v1.1-preview.1". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str or ~azure.ai.vision.face.models.Versions + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + _endpoint = "{endpoint}/face/{apiVersion}" + self._config = FaceAdministrationClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "FaceAdministrationClient": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) + + +class FaceSessionClient(FaceSessionClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """FaceSessionClient. + + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: + https://{resource-name}.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: API Version. Default value is "v1.1-preview.1". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str or ~azure.ai.vision.face.models.Versions + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + _endpoint = "{endpoint}/face/{apiVersion}" + self._config = FaceSessionClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "FaceSessionClient": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_configuration.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_configuration.py new file mode 100644 index 000000000000..02f03574f8db --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_configuration.py @@ -0,0 +1,183 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING, Union + +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies + +from ._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class FaceClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for FaceClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: + https://{resource-name}.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: API Version. Default value is "v1.1-preview.1". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str or ~azure.ai.vision.face.models.Versions + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "v1.1-preview.1") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-vision-face/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Ocp-Apim-Subscription-Key", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) + + +class FaceAdministrationClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for FaceAdministrationClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: + https://{resource-name}.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: API Version. Default value is "v1.1-preview.1". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str or ~azure.ai.vision.face.models.Versions + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "v1.1-preview.1") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-vision-face/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Ocp-Apim-Subscription-Key", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) + + +class FaceSessionClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for FaceSessionClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: + https://{resource-name}.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: API Version. Default value is "v1.1-preview.1". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str or ~azure.ai.vision.face.models.Versions + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "v1.1-preview.1") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-vision-face/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Ocp-Apim-Subscription-Key", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_model_base.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_model_base.py new file mode 100644 index 000000000000..5cf70733404d --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_model_base.py @@ -0,0 +1,887 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access, arguments-differ, signature-differs, broad-except + +import copy +import calendar +import decimal +import functools +import sys +import logging +import base64 +import re +import typing +import enum +import email.utils +from datetime import datetime, date, time, timedelta, timezone +from json import JSONEncoder +from typing_extensions import Self +import isodate +from azure.core.exceptions import DeserializationError +from azure.core import CaseInsensitiveEnumMeta +from azure.core.pipeline import PipelineResponse +from azure.core.serialization import _Null + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping + +_LOGGER = logging.getLogger(__name__) + +__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] + +TZ_UTC = timezone.utc +_T = typing.TypeVar("_T") + + +def _timedelta_as_isostr(td: timedelta) -> str: + """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' + + Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython + + :param timedelta td: The timedelta to convert + :rtype: str + :return: ISO8601 version of this timedelta + """ + + # Split seconds to larger units + seconds = td.total_seconds() + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + + days, hours, minutes = list(map(int, (days, hours, minutes))) + seconds = round(seconds, 6) + + # Build date + date_str = "" + if days: + date_str = "%sD" % days + + if hours or minutes or seconds: + # Build time + time_str = "T" + + # Hours + bigger_exists = date_str or hours + if bigger_exists: + time_str += "{:02}H".format(hours) + + # Minutes + bigger_exists = bigger_exists or minutes + if bigger_exists: + time_str += "{:02}M".format(minutes) + + # Seconds + try: + if seconds.is_integer(): + seconds_string = "{:02}".format(int(seconds)) + else: + # 9 chars long w/ leading 0, 6 digits after decimal + seconds_string = "%09.6f" % seconds + # Remove trailing zeros + seconds_string = seconds_string.rstrip("0") + except AttributeError: # int.is_integer() raises + seconds_string = "{:02}".format(seconds) + + time_str += "{}S".format(seconds_string) + else: + time_str = "" + + return "P" + date_str + time_str + + +def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: + encoded = base64.b64encode(o).decode() + if format == "base64url": + return encoded.strip("=").replace("+", "-").replace("/", "_") + return encoded + + +def _serialize_datetime(o, format: typing.Optional[str] = None): + if hasattr(o, "year") and hasattr(o, "hour"): + if format == "rfc7231": + return email.utils.format_datetime(o, usegmt=True) + if format == "unix-timestamp": + return int(calendar.timegm(o.utctimetuple())) + + # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) + if not o.tzinfo: + iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() + else: + iso_formatted = o.astimezone(TZ_UTC).isoformat() + # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) + return iso_formatted.replace("+00:00", "Z") + # Next try datetime.date or datetime.time + return o.isoformat() + + +def _is_readonly(p): + try: + return p._visibility == ["read"] # pylint: disable=protected-access + except AttributeError: + return False + + +class SdkJSONEncoder(JSONEncoder): + """A JSON encoder that's capable of serializing datetime objects and bytes.""" + + def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): + super().__init__(*args, **kwargs) + self.exclude_readonly = exclude_readonly + self.format = format + + def default(self, o): # pylint: disable=too-many-return-statements + if _is_model(o): + if self.exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + return {k: v for k, v in o.items() if k not in readonly_props} + return dict(o.items()) + try: + return super(SdkJSONEncoder, self).default(o) + except TypeError: + if isinstance(o, _Null): + return None + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, self.format) + try: + # First try datetime.datetime + return _serialize_datetime(o, self.format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return super(SdkJSONEncoder, self).default(o) + + +_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") +_VALID_RFC7231 = re.compile( + r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" + r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" +) + + +def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + attr = attr.upper() + match = _VALID_DATE.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + return date_obj + + +def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize RFC7231 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + match = _VALID_RFC7231.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + return email.utils.parsedate_to_datetime(attr) + + +def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: + """Deserialize unix timestamp into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + return datetime.fromtimestamp(attr, TZ_UTC) + + +def _deserialize_date(attr: typing.Union[str, date]) -> date: + """Deserialize ISO-8601 formatted string into Date object. + :param str attr: response string to be deserialized. + :rtype: date + :returns: The date object from that input + """ + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + if isinstance(attr, date): + return attr + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore + + +def _deserialize_time(attr: typing.Union[str, time]) -> time: + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :returns: The time object from that input + """ + if isinstance(attr, time): + return attr + return isodate.parse_time(attr) + + +def _deserialize_bytes(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + return bytes(base64.b64decode(attr)) + + +def _deserialize_bytes_base64(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return bytes(base64.b64decode(encoded)) + + +def _deserialize_duration(attr): + if isinstance(attr, timedelta): + return attr + return isodate.parse_duration(attr) + + +def _deserialize_decimal(attr): + if isinstance(attr, decimal.Decimal): + return attr + return decimal.Decimal(str(attr)) + + +_DESERIALIZE_MAPPING = { + datetime: _deserialize_datetime, + date: _deserialize_date, + time: _deserialize_time, + bytes: _deserialize_bytes, + bytearray: _deserialize_bytes, + timedelta: _deserialize_duration, + typing.Any: lambda x: x, + decimal.Decimal: _deserialize_decimal, +} + +_DESERIALIZE_MAPPING_WITHFORMAT = { + "rfc3339": _deserialize_datetime, + "rfc7231": _deserialize_datetime_rfc7231, + "unix-timestamp": _deserialize_datetime_unix_timestamp, + "base64": _deserialize_bytes, + "base64url": _deserialize_bytes_base64, +} + + +def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if rf and rf._format: + return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) + return _DESERIALIZE_MAPPING.get(annotation) + + +def _get_type_alias_type(module_name: str, alias_name: str): + types = { + k: v + for k, v in sys.modules[module_name].__dict__.items() + if isinstance(v, typing._GenericAlias) # type: ignore + } + if alias_name not in types: + return alias_name + return types[alias_name] + + +def _get_model(module_name: str, model_name: str): + models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} + module_end = module_name.rsplit(".", 1)[0] + models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) + if isinstance(model_name, str): + model_name = model_name.split(".")[-1] + if model_name not in models: + return model_name + return models[model_name] + + +_UNSET = object() + + +class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object + def __init__(self, data: typing.Dict[str, typing.Any]) -> None: + self._data = data + + def __contains__(self, key: typing.Any) -> bool: + return key in self._data + + def __getitem__(self, key: str) -> typing.Any: + return self._data.__getitem__(key) + + def __setitem__(self, key: str, value: typing.Any) -> None: + self._data.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + self._data.__delitem__(key) + + def __iter__(self) -> typing.Iterator[typing.Any]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __ne__(self, other: typing.Any) -> bool: + return not self.__eq__(other) + + def keys(self) -> typing.KeysView[str]: + return self._data.keys() + + def values(self) -> typing.ValuesView[typing.Any]: + return self._data.values() + + def items(self) -> typing.ItemsView[str, typing.Any]: + return self._data.items() + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + try: + return self[key] + except KeyError: + return default + + @typing.overload + def pop(self, key: str) -> typing.Any: ... + + @typing.overload + def pop(self, key: str, default: _T) -> _T: ... + + @typing.overload + def pop(self, key: str, default: typing.Any) -> typing.Any: ... + + def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.pop(key) + return self._data.pop(key, default) + + def popitem(self) -> typing.Tuple[str, typing.Any]: + return self._data.popitem() + + def clear(self) -> None: + self._data.clear() + + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + self._data.update(*args, **kwargs) + + @typing.overload + def setdefault(self, key: str, default: None = None) -> None: ... + + @typing.overload + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... + + def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.setdefault(key) + return self._data.setdefault(key, default) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_model = self.__class__(other) + except Exception: + return False + return self._data == other_model._data + + def __repr__(self) -> str: + return str(self._data) + + +def _is_model(obj: typing.Any) -> bool: + return getattr(obj, "_is_model", False) + + +def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements + if isinstance(o, list): + return [_serialize(x, format) for x in o] + if isinstance(o, dict): + return {k: _serialize(v, format) for k, v in o.items()} + if isinstance(o, set): + return {_serialize(x, format) for x in o} + if isinstance(o, tuple): + return tuple(_serialize(x, format) for x in o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, format) + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, enum.Enum): + return o.value + try: + # First try datetime.datetime + return _serialize_datetime(o, format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return o + + +def _get_rest_field( + attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str +) -> typing.Optional["_RestField"]: + try: + return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) + except StopIteration: + return None + + +def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + return _serialize(value, rf._format) + + +class Model(_MyMutableMapping): + _is_model = True + + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: + class_name = self.__class__.__name__ + if len(args) > 1: + raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") + dict_to_pass = { + rest_field._rest_name: rest_field._default + for rest_field in self._attr_to_rest_field.values() + if rest_field._default is not _UNSET + } + if args: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) + else: + non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] + if non_attr_kwargs: + # actual type errors only throw the first wrong keyword arg they see, so following that. + raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") + dict_to_pass.update( + { + self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) + for k, v in kwargs.items() + if v is not None + } + ) + super().__init__(dict_to_pass) + + def copy(self) -> "Model": + return Model(self.__dict__) + + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: # pylint: disable=unused-argument + # we know the last three classes in mro are going to be 'Model', 'dict', and 'object' + mros = cls.__mro__[:-3][::-1] # ignore model, dict, and object parents, and reverse the mro order + attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") # pylint: disable=no-member + for k, v in mro_class.__annotations__.items() # pylint: disable=no-member + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) + + return super().__new__(cls) # pylint: disable=no-value-for-parameter + + def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: + for base in cls.__bases__: + if hasattr(base, "__mapping__"): # pylint: disable=no-member + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore # pylint: disable=no-member + + @classmethod + def _get_discriminator(cls, exist_discriminators) -> typing.Optional[str]: + for v in cls.__dict__.values(): + if ( + isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators + ): # pylint: disable=protected-access + return v._rest_name # pylint: disable=protected-access + return None + + @classmethod + def _deserialize(cls, data, exist_discriminators): + if not hasattr(cls, "__mapping__"): # pylint: disable=no-member + return cls(data) + discriminator = cls._get_discriminator(exist_discriminators) + exist_discriminators.append(discriminator) + mapped_cls = cls.__mapping__.get(data.get(discriminator), cls) # pyright: ignore # pylint: disable=no-member + if mapped_cls == cls: + return cls(data) + return mapped_cls._deserialize(data, exist_discriminators) # pylint: disable=protected-access + + def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: + """Return a dict that can be JSONify using json.dump. + + :keyword bool exclude_readonly: Whether to remove the readonly properties. + :returns: A dict JSON compatible object + :rtype: dict + """ + + result = {} + if exclude_readonly: + readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] + for k, v in self.items(): + if exclude_readonly and k in readonly_props: # pyright: ignore + continue + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) + return result + + @staticmethod + def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: + if v is None or isinstance(v, _Null): + return None + if isinstance(v, (list, tuple, set)): + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) + if isinstance(v, dict): + return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} + return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v + + +def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + +def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + +def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + +def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj: typing.Dict[typing.Any, typing.Any], +): + if obj is None: + return obj + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + +def _deserialize_multiple_sequence( + entry_deserializers: typing.List[typing.Optional[typing.Callable]], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) + + +def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + +def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.Any]: + return sorted( + types, + key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), + ) + + +def _get_deserialize_callable_from_annotation( # pylint: disable=R0911, R0915, R0912 + annotation: typing.Any, + module: typing.Optional[str], + rf: typing.Optional["_RestField"] = None, +) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + if not annotation or annotation in [int, float]: + return None + + # is it a type alias? + if isinstance(annotation, str): + if module is not None: + annotation = _get_type_alias_type(module, annotation) + + # is it a forward ref / in quotes? + if isinstance(annotation, (str, typing.ForwardRef)): + try: + model_name = annotation.__forward_arg__ # type: ignore + except AttributeError: + model_name = annotation + if module is not None: + annotation = _get_model(module, model_name) + + try: + if module and _is_model(annotation): + if rf: + rf._is_model = True + + return functools.partial(_deserialize_model, annotation) # pyright: ignore + except Exception: + pass + + # is it a literal? + try: + if annotation.__origin__ is typing.Literal: # pyright: ignore + return None + except AttributeError: + pass + + # is it optional? + try: + if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore + if len(annotation.__args__) <= 2: # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + # the type is Optional[Union[...]], we need to remove the None type from the Union + annotation_copy = copy.copy(annotation) + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore + return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) + except AttributeError: + pass + + # is it union? + if getattr(annotation, "__origin__", None) is typing.Union: + # initial ordering is we make `string` the last deserialization option, because it is often them most generic + deserializers = [ + _get_deserialize_callable_from_annotation(arg, module, rf) + for arg in _sorted_annotations(annotation.__args__) # pyright: ignore + ] + + return functools.partial(_deserialize_with_union, deserializers) + + try: + if annotation._name == "Dict": # pyright: ignore + value_deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[1], module, rf # pyright: ignore + ) + + return functools.partial( + _deserialize_dict, + value_deserializer, + module, + ) + except (AttributeError, IndexError): + pass + try: + if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore + if len(annotation.__args__) > 1: # pyright: ignore + + entry_deserializers = [ + _get_deserialize_callable_from_annotation(dt, module, rf) + for dt in annotation.__args__ # pyright: ignore + ] + return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) + deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[0], module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_sequence, deserializer, module) + except (TypeError, IndexError, AttributeError, SyntaxError): + pass + + def _deserialize_default( + deserializer, + obj, + ): + if obj is None: + return obj + try: + return _deserialize_with_callable(deserializer, obj) + except Exception: + pass + return obj + + if get_deserializer(annotation, rf): + return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) + + return functools.partial(_deserialize_default, annotation) + + +def _deserialize_with_callable( + deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], + value: typing.Any, +): + try: + if value is None or isinstance(value, _Null): + return None + if deserializer is None: + return value + if isinstance(deserializer, CaseInsensitiveEnumMeta): + try: + return deserializer(value) + except ValueError: + # for unknown value, return raw value + return value + if isinstance(deserializer, type) and issubclass(deserializer, Model): + return deserializer._deserialize(value, []) + return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) + except Exception as e: + raise DeserializationError() from e + + +def _deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + if isinstance(value, PipelineResponse): + value = value.http_response.json() + if rf is None and format: + rf = _RestField(format=format) + if not isinstance(deserializer, functools.partial): + deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) + return _deserialize_with_callable(deserializer, value) + + +class _RestField: + def __init__( + self, + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + is_discriminator: bool = False, + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + ): + self._type = type + self._rest_name_input = name + self._module: typing.Optional[str] = None + self._is_discriminator = is_discriminator + self._visibility = visibility + self._is_model = False + self._default = default + self._format = format + self._is_multipart_file_input = is_multipart_file_input + + @property + def _class_type(self) -> typing.Any: + return getattr(self._type, "args", [None])[0] + + @property + def _rest_name(self) -> str: + if self._rest_name_input is None: + raise ValueError("Rest name was never set") + return self._rest_name_input + + def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin + # by this point, type and rest_name will have a value bc we default + # them in __new__ of the Model class + item = obj.get(self._rest_name) + if item is None: + return item + if self._is_model: + return item + return _deserialize(self._type, _serialize(item, self._format), rf=self) + + def __set__(self, obj: Model, value) -> None: + if value is None: + # we want to wipe out entries if users set attr to None + try: + obj.__delitem__(self._rest_name) + except KeyError: + pass + return + if self._is_model: + if not _is_model(value): + value = _deserialize(self._type, value) + obj.__setitem__(self._rest_name, value) + return + obj.__setitem__(self._rest_name, _serialize(value, self._format)) + + def _get_deserialize_callable_from_annotation( + self, annotation: typing.Any + ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + return _get_deserialize_callable_from_annotation(annotation, self._module, self) + + +def rest_field( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, +) -> typing.Any: + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + ) + + +def rest_discriminator( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True) diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_operations/__init__.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_operations/__init__.py new file mode 100644 index 000000000000..9642d0953f19 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_operations/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import FaceClientOperationsMixin +from ._operations import FaceAdministrationClientOperationsMixin +from ._operations import FaceSessionClientOperationsMixin + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "FaceClientOperationsMixin", + "FaceAdministrationClientOperationsMixin", + "FaceSessionClientOperationsMixin", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_operations/_operations.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_operations/_operations.py new file mode 100644 index 000000000000..1a5deace3dbf --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_operations/_operations.py @@ -0,0 +1,20437 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, Callable, Dict, IO, List, Optional, Type, TypeVar, Union, cast, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.polling.base_polling import LROBasePolling +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import _model_base, models as _models +from .._model_base import SdkJSONEncoder, _deserialize +from .._serialization import Serializer +from .._vendor import ( + FaceAdministrationClientMixinABC, + FaceClientMixinABC, + FaceSessionClientMixinABC, + prepare_multipart_form_data, +) + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_face_detect_from_url_request( + *, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + return_face_id: Optional[bool] = None, + return_face_attributes: Optional[List[Union[str, _models.FaceAttributeType]]] = None, + return_face_landmarks: Optional[bool] = None, + return_recognition_model: Optional[bool] = None, + face_id_time_to_live: Optional[int] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/detect" + + # Construct parameters + if detection_model is not None: + _params["detectionModel"] = _SERIALIZER.query("detection_model", detection_model, "str") + if recognition_model is not None: + _params["recognitionModel"] = _SERIALIZER.query("recognition_model", recognition_model, "str") + if return_face_id is not None: + _params["returnFaceId"] = _SERIALIZER.query("return_face_id", return_face_id, "bool") + if return_face_attributes is not None: + _params["returnFaceAttributes"] = _SERIALIZER.query( + "return_face_attributes", return_face_attributes, "[str]", div="," + ) + if return_face_landmarks is not None: + _params["returnFaceLandmarks"] = _SERIALIZER.query("return_face_landmarks", return_face_landmarks, "bool") + if return_recognition_model is not None: + _params["returnRecognitionModel"] = _SERIALIZER.query( + "return_recognition_model", return_recognition_model, "bool" + ) + if face_id_time_to_live is not None: + _params["faceIdTimeToLive"] = _SERIALIZER.query("face_id_time_to_live", face_id_time_to_live, "int") + + # Construct headers + if content_type is not None: + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_detect_request( + *, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + return_face_id: Optional[bool] = None, + return_face_attributes: Optional[List[Union[str, _models.FaceAttributeType]]] = None, + return_face_landmarks: Optional[bool] = None, + return_recognition_model: Optional[bool] = None, + face_id_time_to_live: Optional[int] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/detect" + + # Construct parameters + if detection_model is not None: + _params["detectionModel"] = _SERIALIZER.query("detection_model", detection_model, "str") + if recognition_model is not None: + _params["recognitionModel"] = _SERIALIZER.query("recognition_model", recognition_model, "str") + if return_face_id is not None: + _params["returnFaceId"] = _SERIALIZER.query("return_face_id", return_face_id, "bool") + if return_face_attributes is not None: + _params["returnFaceAttributes"] = _SERIALIZER.query( + "return_face_attributes", return_face_attributes, "[str]", div="," + ) + if return_face_landmarks is not None: + _params["returnFaceLandmarks"] = _SERIALIZER.query("return_face_landmarks", return_face_landmarks, "bool") + if return_recognition_model is not None: + _params["returnRecognitionModel"] = _SERIALIZER.query( + "return_recognition_model", return_recognition_model, "bool" + ) + if face_id_time_to_live is not None: + _params["faceIdTimeToLive"] = _SERIALIZER.query("face_id_time_to_live", face_id_time_to_live, "int") + + # Construct headers + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_find_similar_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/findsimilars" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_find_similar_from_face_list_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/findsimilars" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_find_similar_from_large_face_list_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/findsimilars" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_identify_from_person_group_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/identify" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_identify_from_large_person_group_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/identify" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_identify_from_person_directory_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/identify" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_identify_from_dynamic_person_group_request( # pylint: disable=name-too-long + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/identify" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_verify_face_to_face_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/verify" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_verify_from_person_group_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/verify" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_verify_from_large_person_group_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/verify" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_verify_from_person_directory_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/verify" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_group_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/group" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_create_face_list_request( # pylint: disable=name-too-long + face_list_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/facelists/{faceListId}" + path_format_arguments = { + "faceListId": _SERIALIZER.url("face_list_id", face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_delete_face_list_request( # pylint: disable=name-too-long + face_list_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/facelists/{faceListId}" + path_format_arguments = { + "faceListId": _SERIALIZER.url("face_list_id", face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_face_list_request( # pylint: disable=name-too-long + face_list_id: str, *, return_recognition_model: Optional[bool] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/facelists/{faceListId}" + path_format_arguments = { + "faceListId": _SERIALIZER.url("face_list_id", face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if return_recognition_model is not None: + _params["returnRecognitionModel"] = _SERIALIZER.query( + "return_recognition_model", return_recognition_model, "bool" + ) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_update_face_list_request( # pylint: disable=name-too-long + face_list_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/facelists/{faceListId}" + path_format_arguments = { + "faceListId": _SERIALIZER.url("face_list_id", face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_face_lists_request( # pylint: disable=name-too-long + *, return_recognition_model: Optional[bool] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/facelists" + + # Construct parameters + if return_recognition_model is not None: + _params["returnRecognitionModel"] = _SERIALIZER.query( + "return_recognition_model", return_recognition_model, "bool" + ) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_add_face_list_face_from_url_request( # pylint: disable=name-too-long + face_list_id: str, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/facelists/{faceListId}/persistedfaces" + path_format_arguments = { + "faceListId": _SERIALIZER.url("face_list_id", face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if target_face is not None: + _params["targetFace"] = _SERIALIZER.query("target_face", target_face, "[int]", div=",") + if detection_model is not None: + _params["detectionModel"] = _SERIALIZER.query("detection_model", detection_model, "str") + if user_data is not None: + _params["userData"] = _SERIALIZER.query("user_data", user_data, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_add_face_list_face_request( # pylint: disable=name-too-long + face_list_id: str, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/facelists/{faceListId}/persistedfaces" + path_format_arguments = { + "faceListId": _SERIALIZER.url("face_list_id", face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if target_face is not None: + _params["targetFace"] = _SERIALIZER.query("target_face", target_face, "[int]", div=",") + if detection_model is not None: + _params["detectionModel"] = _SERIALIZER.query("detection_model", detection_model, "str") + if user_data is not None: + _params["userData"] = _SERIALIZER.query("user_data", user_data, "str") + + # Construct headers + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_delete_face_list_face_request( # pylint: disable=name-too-long + face_list_id: str, persisted_face_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/facelists/{faceListId}/persistedfaces/{persistedFaceId}" + path_format_arguments = { + "faceListId": _SERIALIZER.url("face_list_id", face_list_id, "str"), + "persistedFaceId": _SERIALIZER.url("persisted_face_id", persisted_face_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_create_large_face_list_request( # pylint: disable=name-too-long + large_face_list_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largefacelists/{largeFaceListId}" + path_format_arguments = { + "largeFaceListId": _SERIALIZER.url("large_face_list_id", large_face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_delete_large_face_list_request( # pylint: disable=name-too-long + large_face_list_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largefacelists/{largeFaceListId}" + path_format_arguments = { + "largeFaceListId": _SERIALIZER.url("large_face_list_id", large_face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_large_face_list_request( # pylint: disable=name-too-long + large_face_list_id: str, *, return_recognition_model: Optional[bool] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largefacelists/{largeFaceListId}" + path_format_arguments = { + "largeFaceListId": _SERIALIZER.url("large_face_list_id", large_face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if return_recognition_model is not None: + _params["returnRecognitionModel"] = _SERIALIZER.query( + "return_recognition_model", return_recognition_model, "bool" + ) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_update_large_face_list_request( # pylint: disable=name-too-long + large_face_list_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largefacelists/{largeFaceListId}" + path_format_arguments = { + "largeFaceListId": _SERIALIZER.url("large_face_list_id", large_face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_large_face_lists_request( # pylint: disable=name-too-long + *, + start: Optional[str] = None, + top: Optional[int] = None, + return_recognition_model: Optional[bool] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largefacelists" + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + if return_recognition_model is not None: + _params["returnRecognitionModel"] = _SERIALIZER.query( + "return_recognition_model", return_recognition_model, "bool" + ) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_get_large_face_list_training_status_request( # pylint: disable=name-too-long + large_face_list_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largefacelists/{largeFaceListId}/training" + path_format_arguments = { + "largeFaceListId": _SERIALIZER.url("large_face_list_id", large_face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_train_large_face_list_request( # pylint: disable=name-too-long + large_face_list_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largefacelists/{largeFaceListId}/train" + path_format_arguments = { + "largeFaceListId": _SERIALIZER.url("large_face_list_id", large_face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_add_large_face_list_face_from_url_request( # pylint: disable=name-too-long + large_face_list_id: str, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largefacelists/{largeFaceListId}/persistedfaces" + path_format_arguments = { + "largeFaceListId": _SERIALIZER.url("large_face_list_id", large_face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if target_face is not None: + _params["targetFace"] = _SERIALIZER.query("target_face", target_face, "[int]", div=",") + if detection_model is not None: + _params["detectionModel"] = _SERIALIZER.query("detection_model", detection_model, "str") + if user_data is not None: + _params["userData"] = _SERIALIZER.query("user_data", user_data, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_add_large_face_list_face_request( # pylint: disable=name-too-long + large_face_list_id: str, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largefacelists/{largeFaceListId}/persistedfaces" + path_format_arguments = { + "largeFaceListId": _SERIALIZER.url("large_face_list_id", large_face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if target_face is not None: + _params["targetFace"] = _SERIALIZER.query("target_face", target_face, "[int]", div=",") + if detection_model is not None: + _params["detectionModel"] = _SERIALIZER.query("detection_model", detection_model, "str") + if user_data is not None: + _params["userData"] = _SERIALIZER.query("user_data", user_data, "str") + + # Construct headers + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_delete_large_face_list_face_request( # pylint: disable=name-too-long + large_face_list_id: str, persisted_face_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largefacelists/{largeFaceListId}/persistedfaces/{persistedFaceId}" + path_format_arguments = { + "largeFaceListId": _SERIALIZER.url("large_face_list_id", large_face_list_id, "str"), + "persistedFaceId": _SERIALIZER.url("persisted_face_id", persisted_face_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_large_face_list_face_request( # pylint: disable=name-too-long + large_face_list_id: str, persisted_face_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largefacelists/{largeFaceListId}/persistedfaces/{persistedFaceId}" + path_format_arguments = { + "largeFaceListId": _SERIALIZER.url("large_face_list_id", large_face_list_id, "str"), + "persistedFaceId": _SERIALIZER.url("persisted_face_id", persisted_face_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_update_large_face_list_face_request( # pylint: disable=name-too-long + large_face_list_id: str, persisted_face_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largefacelists/{largeFaceListId}/persistedfaces/{persistedFaceId}" + path_format_arguments = { + "largeFaceListId": _SERIALIZER.url("large_face_list_id", large_face_list_id, "str"), + "persistedFaceId": _SERIALIZER.url("persisted_face_id", persisted_face_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_large_face_list_faces_request( # pylint: disable=name-too-long + large_face_list_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largefacelists/{largeFaceListId}/persistedfaces" + path_format_arguments = { + "largeFaceListId": _SERIALIZER.url("large_face_list_id", large_face_list_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_create_person_group_request( # pylint: disable=name-too-long + person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_delete_person_group_request( # pylint: disable=name-too-long + person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_person_group_request( # pylint: disable=name-too-long + person_group_id: str, *, return_recognition_model: Optional[bool] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if return_recognition_model is not None: + _params["returnRecognitionModel"] = _SERIALIZER.query( + "return_recognition_model", return_recognition_model, "bool" + ) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_update_person_group_request( # pylint: disable=name-too-long + person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_person_groups_request( # pylint: disable=name-too-long + *, + start: Optional[str] = None, + top: Optional[int] = None, + return_recognition_model: Optional[bool] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups" + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + if return_recognition_model is not None: + _params["returnRecognitionModel"] = _SERIALIZER.query( + "return_recognition_model", return_recognition_model, "bool" + ) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_get_person_group_training_status_request( # pylint: disable=name-too-long + person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}/training" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_train_person_group_request( # pylint: disable=name-too-long + person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}/train" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_create_person_group_person_request( # pylint: disable=name-too-long + person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}/persons" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_delete_person_group_person_request( # pylint: disable=name-too-long + person_group_id: str, person_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}/persons/{personId}" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_person_group_person_request( # pylint: disable=name-too-long + person_group_id: str, person_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}/persons/{personId}" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_update_person_group_person_request( # pylint: disable=name-too-long + person_group_id: str, person_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}/persons/{personId}" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_person_group_persons_request( # pylint: disable=name-too-long + person_group_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}/persons" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_add_person_group_person_face_from_url_request( # pylint: disable=name-too-long + person_group_id: str, + person_id: str, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}/persons/{personId}/persistedfaces" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if target_face is not None: + _params["targetFace"] = _SERIALIZER.query("target_face", target_face, "[int]", div=",") + if detection_model is not None: + _params["detectionModel"] = _SERIALIZER.query("detection_model", detection_model, "str") + if user_data is not None: + _params["userData"] = _SERIALIZER.query("user_data", user_data, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_add_person_group_person_face_request( # pylint: disable=name-too-long + person_group_id: str, + person_id: str, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}/persons/{personId}/persistedfaces" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if target_face is not None: + _params["targetFace"] = _SERIALIZER.query("target_face", target_face, "[int]", div=",") + if detection_model is not None: + _params["detectionModel"] = _SERIALIZER.query("detection_model", detection_model, "str") + if user_data is not None: + _params["userData"] = _SERIALIZER.query("user_data", user_data, "str") + + # Construct headers + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_delete_person_group_person_face_request( # pylint: disable=name-too-long + person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}/persons/{personId}/persistedfaces/{persistedFaceId}" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + "persistedFaceId": _SERIALIZER.url("persisted_face_id", persisted_face_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_person_group_person_face_request( # pylint: disable=name-too-long + person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}/persons/{personId}/persistedfaces/{persistedFaceId}" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + "persistedFaceId": _SERIALIZER.url("persisted_face_id", persisted_face_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_update_person_group_person_face_request( # pylint: disable=name-too-long + person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persongroups/{personGroupId}/persons/{personId}/persistedfaces/{persistedFaceId}" + path_format_arguments = { + "personGroupId": _SERIALIZER.url("person_group_id", person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + "persistedFaceId": _SERIALIZER.url("persisted_face_id", persisted_face_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_create_large_person_group_request( # pylint: disable=name-too-long + large_person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_delete_large_person_group_request( # pylint: disable=name-too-long + large_person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_large_person_group_request( # pylint: disable=name-too-long + large_person_group_id: str, *, return_recognition_model: Optional[bool] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if return_recognition_model is not None: + _params["returnRecognitionModel"] = _SERIALIZER.query( + "return_recognition_model", return_recognition_model, "bool" + ) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_update_large_person_group_request( # pylint: disable=name-too-long + large_person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_large_person_groups_request( # pylint: disable=name-too-long + *, + start: Optional[str] = None, + top: Optional[int] = None, + return_recognition_model: Optional[bool] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups" + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + if return_recognition_model is not None: + _params["returnRecognitionModel"] = _SERIALIZER.query( + "return_recognition_model", return_recognition_model, "bool" + ) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_get_large_person_group_training_status_request( # pylint: disable=name-too-long + large_person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}/training" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_train_large_person_group_request( # pylint: disable=name-too-long + large_person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}/train" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_create_large_person_group_person_request( # pylint: disable=name-too-long + large_person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}/persons" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_delete_large_person_group_person_request( # pylint: disable=name-too-long + large_person_group_id: str, person_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}/persons/{personId}" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_large_person_group_person_request( # pylint: disable=name-too-long + large_person_group_id: str, person_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}/persons/{personId}" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_update_large_person_group_person_request( # pylint: disable=name-too-long + large_person_group_id: str, person_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}/persons/{personId}" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_large_person_group_persons_request( # pylint: disable=name-too-long + large_person_group_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}/persons" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_add_large_person_group_person_face_from_url_request( # pylint: disable=name-too-long + large_person_group_id: str, + person_id: str, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if target_face is not None: + _params["targetFace"] = _SERIALIZER.query("target_face", target_face, "[int]", div=",") + if detection_model is not None: + _params["detectionModel"] = _SERIALIZER.query("detection_model", detection_model, "str") + if user_data is not None: + _params["userData"] = _SERIALIZER.query("user_data", user_data, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_add_large_person_group_person_face_request( # pylint: disable=name-too-long + large_person_group_id: str, + person_id: str, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if target_face is not None: + _params["targetFace"] = _SERIALIZER.query("target_face", target_face, "[int]", div=",") + if detection_model is not None: + _params["detectionModel"] = _SERIALIZER.query("detection_model", detection_model, "str") + if user_data is not None: + _params["userData"] = _SERIALIZER.query("user_data", user_data, "str") + + # Construct headers + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_delete_large_person_group_person_face_request( # pylint: disable=name-too-long + large_person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces/{persistedFaceId}" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + "persistedFaceId": _SERIALIZER.url("persisted_face_id", persisted_face_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_large_person_group_person_face_request( # pylint: disable=name-too-long + large_person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces/{persistedFaceId}" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + "persistedFaceId": _SERIALIZER.url("persisted_face_id", persisted_face_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_update_large_person_group_person_face_request( # pylint: disable=name-too-long + large_person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces/{persistedFaceId}" + path_format_arguments = { + "largePersonGroupId": _SERIALIZER.url("large_person_group_id", large_person_group_id, "str"), + "personId": _SERIALIZER.url("person_id", person_id, "str"), + "persistedFaceId": _SERIALIZER.url("persisted_face_id", persisted_face_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_create_person_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persons" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_delete_person_request( # pylint: disable=name-too-long + person_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persons/{personId}" + path_format_arguments = { + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_person_request( # pylint: disable=name-too-long + person_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persons/{personId}" + path_format_arguments = { + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_update_person_request( # pylint: disable=name-too-long + person_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persons/{personId}" + path_format_arguments = { + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_persons_request( # pylint: disable=name-too-long + *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persons" + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_get_dynamic_person_group_references_request( # pylint: disable=name-too-long + person_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persons/{personId}/dynamicPersonGroupReferences" + path_format_arguments = { + "personId": _SERIALIZER.url("person_id", person_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_add_person_face_from_url_request( # pylint: disable=name-too-long + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persons/{personId}/recognitionModels/{recognitionModel}/persistedfaces" + path_format_arguments = { + "personId": _SERIALIZER.url("person_id", person_id, "str"), + "recognitionModel": _SERIALIZER.url("recognition_model", recognition_model, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if target_face is not None: + _params["targetFace"] = _SERIALIZER.query("target_face", target_face, "[int]", div=",") + if detection_model is not None: + _params["detectionModel"] = _SERIALIZER.query("detection_model", detection_model, "str") + if user_data is not None: + _params["userData"] = _SERIALIZER.query("user_data", user_data, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_add_person_face_request( # pylint: disable=name-too-long + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persons/{personId}/recognitionModels/{recognitionModel}/persistedfaces" + path_format_arguments = { + "personId": _SERIALIZER.url("person_id", person_id, "str"), + "recognitionModel": _SERIALIZER.url("recognition_model", recognition_model, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if target_face is not None: + _params["targetFace"] = _SERIALIZER.query("target_face", target_face, "[int]", div=",") + if detection_model is not None: + _params["detectionModel"] = _SERIALIZER.query("detection_model", detection_model, "str") + if user_data is not None: + _params["userData"] = _SERIALIZER.query("user_data", user_data, "str") + + # Construct headers + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_delete_person_face_request( # pylint: disable=name-too-long + person_id: str, recognition_model: Union[str, _models.FaceRecognitionModel], persisted_face_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persons/{personId}/recognitionModels/{recognitionModel}/persistedfaces/{persistedFaceId}" + path_format_arguments = { + "personId": _SERIALIZER.url("person_id", person_id, "str"), + "recognitionModel": _SERIALIZER.url("recognition_model", recognition_model, "str"), + "persistedFaceId": _SERIALIZER.url("persisted_face_id", persisted_face_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_person_face_request( # pylint: disable=name-too-long + person_id: str, recognition_model: Union[str, _models.FaceRecognitionModel], persisted_face_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persons/{personId}/recognitionModels/{recognitionModel}/persistedfaces/{persistedFaceId}" + path_format_arguments = { + "personId": _SERIALIZER.url("person_id", person_id, "str"), + "recognitionModel": _SERIALIZER.url("recognition_model", recognition_model, "str"), + "persistedFaceId": _SERIALIZER.url("persisted_face_id", persisted_face_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_update_person_face_request( # pylint: disable=name-too-long + person_id: str, recognition_model: Union[str, _models.FaceRecognitionModel], persisted_face_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persons/{personId}/recognitionModels/{recognitionModel}/persistedfaces/{persistedFaceId}" + path_format_arguments = { + "personId": _SERIALIZER.url("person_id", person_id, "str"), + "recognitionModel": _SERIALIZER.url("recognition_model", recognition_model, "str"), + "persistedFaceId": _SERIALIZER.url("persisted_face_id", persisted_face_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_person_faces_request( # pylint: disable=name-too-long + person_id: str, recognition_model: Union[str, _models.FaceRecognitionModel], **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/persons/{personId}/recognitionModels/{recognitionModel}/persistedfaces" + path_format_arguments = { + "personId": _SERIALIZER.url("person_id", person_id, "str"), + "recognitionModel": _SERIALIZER.url("recognition_model", recognition_model, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_create_dynamic_person_group_with_person_request( # pylint: disable=name-too-long + dynamic_person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/dynamicpersongroups/{dynamicPersonGroupId}" + path_format_arguments = { + "dynamicPersonGroupId": _SERIALIZER.url("dynamic_person_group_id", dynamic_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_create_dynamic_person_group_request( # pylint: disable=name-too-long + dynamic_person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/dynamicpersongroups/{dynamicPersonGroupId}" + path_format_arguments = { + "dynamicPersonGroupId": _SERIALIZER.url("dynamic_person_group_id", dynamic_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_delete_dynamic_person_group_request( # pylint: disable=name-too-long + dynamic_person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/dynamicpersongroups/{dynamicPersonGroupId}" + path_format_arguments = { + "dynamicPersonGroupId": _SERIALIZER.url("dynamic_person_group_id", dynamic_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_dynamic_person_group_request( # pylint: disable=name-too-long + dynamic_person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/dynamicpersongroups/{dynamicPersonGroupId}" + path_format_arguments = { + "dynamicPersonGroupId": _SERIALIZER.url("dynamic_person_group_id", dynamic_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_update_dynamic_person_group_with_person_changes_request( # pylint: disable=name-too-long + dynamic_person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/dynamicpersongroups/{dynamicPersonGroupId}" + path_format_arguments = { + "dynamicPersonGroupId": _SERIALIZER.url("dynamic_person_group_id", dynamic_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_update_dynamic_person_group_request( # pylint: disable=name-too-long + dynamic_person_group_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/dynamicpersongroups/{dynamicPersonGroupId}" + path_format_arguments = { + "dynamicPersonGroupId": _SERIALIZER.url("dynamic_person_group_id", dynamic_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, headers=_headers, **kwargs) + + +def build_face_administration_get_dynamic_person_groups_request( # pylint: disable=name-too-long + *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/dynamicpersongroups" + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_administration_get_dynamic_person_group_persons_request( # pylint: disable=name-too-long + dynamic_person_group_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/dynamicpersongroups/{dynamicPersonGroupId}/persons" + path_format_arguments = { + "dynamicPersonGroupId": _SERIALIZER.url("dynamic_person_group_id", dynamic_person_group_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_session_create_liveness_session_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/detectLiveness/singleModal/sessions" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_session_delete_liveness_session_request( # pylint: disable=name-too-long + session_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/detectLiveness/singleModal/sessions/{sessionId}" + path_format_arguments = { + "sessionId": _SERIALIZER.url("session_id", session_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_session_get_liveness_session_result_request( # pylint: disable=name-too-long + session_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/detectLiveness/singleModal/sessions/{sessionId}" + path_format_arguments = { + "sessionId": _SERIALIZER.url("session_id", session_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_session_get_liveness_sessions_request( # pylint: disable=name-too-long + *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/detectLiveness/singleModal/sessions" + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_session_get_liveness_session_audit_entries_request( # pylint: disable=name-too-long + session_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/detectLiveness/singleModal/sessions/{sessionId}/audit" + path_format_arguments = { + "sessionId": _SERIALIZER.url("session_id", session_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_session_create_liveness_with_verify_session_request( # pylint: disable=name-too-long + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/detectLivenessWithVerify/singleModal/sessions" + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_session_create_liveness_with_verify_session_with_verify_image_request( # pylint: disable=name-too-long + **kwargs: Any, +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/detectLivenessWithVerify/singleModal/sessions" + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_face_session_delete_liveness_with_verify_session_request( # pylint: disable=name-too-long + session_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/detectLivenessWithVerify/singleModal/sessions/{sessionId}" + path_format_arguments = { + "sessionId": _SERIALIZER.url("session_id", session_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + +def build_face_session_get_liveness_with_verify_session_result_request( # pylint: disable=name-too-long + session_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/detectLivenessWithVerify/singleModal/sessions/{sessionId}" + path_format_arguments = { + "sessionId": _SERIALIZER.url("session_id", session_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_face_session_get_liveness_with_verify_sessions_request( # pylint: disable=name-too-long + *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/detectLivenessWithVerify/singleModal/sessions" + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_face_session_get_liveness_with_verify_session_audit_entries_request( # pylint: disable=name-too-long + session_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/detectLivenessWithVerify/singleModal/sessions/{sessionId}/audit" + path_format_arguments = { + "sessionId": _SERIALIZER.url("session_id", session_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + if start is not None: + _params["start"] = _SERIALIZER.query("start", start, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class FaceClientOperationsMixin(FaceClientMixinABC): + + @overload + def _detect_from_url( + self, + body: JSON, + *, + content_type: str = "application/json", + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + return_face_id: Optional[bool] = None, + return_face_attributes: Optional[List[Union[str, _models.FaceAttributeType]]] = None, + return_face_landmarks: Optional[bool] = None, + return_recognition_model: Optional[bool] = None, + face_id_time_to_live: Optional[int] = None, + **kwargs: Any, + ) -> List[_models.FaceDetectionResult]: ... + @overload + def _detect_from_url( + self, + *, + url: str, + content_type: str = "application/json", + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + return_face_id: Optional[bool] = None, + return_face_attributes: Optional[List[Union[str, _models.FaceAttributeType]]] = None, + return_face_landmarks: Optional[bool] = None, + return_recognition_model: Optional[bool] = None, + face_id_time_to_live: Optional[int] = None, + **kwargs: Any, + ) -> List[_models.FaceDetectionResult]: ... + @overload + def _detect_from_url( + self, + body: IO[bytes], + *, + content_type: str = "application/json", + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + return_face_id: Optional[bool] = None, + return_face_attributes: Optional[List[Union[str, _models.FaceAttributeType]]] = None, + return_face_landmarks: Optional[bool] = None, + return_recognition_model: Optional[bool] = None, + face_id_time_to_live: Optional[int] = None, + **kwargs: Any, + ) -> List[_models.FaceDetectionResult]: ... + + @distributed_trace + def _detect_from_url( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + return_face_id: Optional[bool] = None, + return_face_attributes: Optional[List[Union[str, _models.FaceAttributeType]]] = None, + return_face_landmarks: Optional[bool] = None, + return_recognition_model: Optional[bool] = None, + face_id_time_to_live: Optional[int] = None, + **kwargs: Any, + ) -> List[_models.FaceDetectionResult]: + # pylint: disable=line-too-long + """Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, + and attributes. + + .. + + [!IMPORTANT] + To mitigate potential misuse that can subject people to stereotyping, discrimination, or + unfair denial of services, we are retiring Face API attributes that predict emotion, gender, + age, smile, facial hair, hair, and makeup. Read more about this decision + https://azure.microsoft.com/en-us/blog/responsible-ai-investments-and-safeguards-for-facial-recognition/. + + + * + + + * No image will be stored. Only the extracted face feature(s) will be stored on server. The + faceId is an identifier of the face feature and will be used in "Identify", "Verify", and "Find + Similar". The stored face features will expire and be deleted at the time specified by + faceIdTimeToLive after the original detection call. + * Optional parameters include faceId, landmarks, and attributes. Attributes include headPose, + glasses, occlusion, accessories, blur, exposure, noise, mask, and qualityForRecognition. Some + of the results returned for specific attributes may not be highly accurate. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from + large to small. + * For optimal results when querying "Identify", "Verify", and "Find Similar" ('returnFaceId' is + true), please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels + (100 pixels between eyes). + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + * 'detection_02': Face attributes and landmarks are disabled if you choose this detection + model. + * 'detection_03': Face attributes (mask and headPose only) and landmarks are supported if you + choose this detection model. + + * Different 'recognitionModel' values are provided. If follow-up operations like "Verify", + "Identify", "Find Similar" are needed, please specify the recognition model with + 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if + latest model needed, please explicitly specify the model you need in this parameter. Once + specified, the detected faceIds will be associated with the specified recognition model. More + details, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-recognition-model. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword recognition_model: The 'recognitionModel' associated with the detected faceIds. + Supported 'recognitionModel' values include 'recognition_01', 'recognition_02', + 'recognition_03' or 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' + is recommended since its accuracy is improved on faces wearing masks compared with + 'recognition_03', and its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", "recognition_03", and + "recognition_04". Default value is None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :keyword return_face_id: Return faceIds of the detected faces or not. The default value is + true. Default value is None. + :paramtype return_face_id: bool + :keyword return_face_attributes: Analyze and return the one or more specified face attributes + in the comma-separated string like 'returnFaceAttributes=headPose,glasses'. Face attribute + analysis has additional computational and time cost. Default value is None. + :paramtype return_face_attributes: list[str or ~azure.ai.vision.face.models.FaceAttributeType] + :keyword return_face_landmarks: Return face landmarks of the detected faces or not. The default + value is false. Default value is None. + :paramtype return_face_landmarks: bool + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. This is only applicable when returnFaceId = true. Default value is None. + :paramtype return_recognition_model: bool + :keyword face_id_time_to_live: The number of seconds for the face ID being cached. Supported + range from 60 seconds up to 86400 seconds. The default value is 86400 (24 hours). Default value + is None. + :paramtype face_id_time_to_live: int + :return: list of FaceDetectionResult + :rtype: list[~azure.ai.vision.face.models.FaceDetectionResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == [ + { + "faceRectangle": { + "height": 0, # The height of the rectangle, in pixels. + Required. + "left": 0, # The distance from the left edge if the image to + the left edge of the rectangle, in pixels. Required. + "top": 0, # The distance from the top edge if the image to + the top edge of the rectangle, in pixels. Required. + "width": 0 # The width of the rectangle, in pixels. + Required. + }, + "faceAttributes": { + "accessories": [ + { + "confidence": 0.0, # Confidence level of the + accessory type. Range between [0,1]. Required. + "type": "str" # Type of the accessory. + Required. Known values are: "headwear", "glasses", and "mask". + } + ], + "age": 0.0, # Optional. Age in years. + "blur": { + "blurLevel": "str", # An enum value indicating level + of blurriness. Required. Known values are: "low", "medium", and + "high". + "value": 0.0 # A number indicating level of + blurriness ranging from 0 to 1. Required. + }, + "exposure": { + "exposureLevel": "str", # An enum value indicating + level of exposure. Required. Known values are: "underExposure", + "goodExposure", and "overExposure". + "value": 0.0 # A number indicating level of exposure + level ranging from 0 to 1. [0, 0.25) is under exposure. [0.25, 0.75) + is good exposure. [0.75, 1] is over exposure. Required. + }, + "facialHair": { + "beard": 0.0, # A number ranging from 0 to 1 + indicating a level of confidence associated with a property. + Required. + "moustache": 0.0, # A number ranging from 0 to 1 + indicating a level of confidence associated with a property. + Required. + "sideburns": 0.0 # A number ranging from 0 to 1 + indicating a level of confidence associated with a property. + Required. + }, + "glasses": "str", # Optional. Glasses type if any of the + face. Known values are: "noGlasses", "readingGlasses", "sunglasses", and + "swimmingGoggles". + "hair": { + "bald": 0.0, # A number describing confidence level + of whether the person is bald. Required. + "hairColor": [ + { + "color": "str", # Name of the hair + color. Required. Known values are: "unknown", "white", + "gray", "blond", "brown", "red", "black", and "other". + "confidence": 0.0 # Confidence level + of the color. Range between [0,1]. Required. + } + ], + "invisible": bool # A boolean value describing + whether the hair is visible in the image. Required. + }, + "headPose": { + "pitch": 0.0, # Value of angles. Required. + "roll": 0.0, # Value of angles. Required. + "yaw": 0.0 # Value of angles. Required. + }, + "mask": { + "noseAndMouthCovered": bool, # A boolean value + indicating whether nose and mouth are covered. Required. + "type": "str" # Type of the mask. Required. Known + values are: "faceMask", "noMask", "otherMaskOrOcclusion", and + "uncertain". + }, + "noise": { + "noiseLevel": "str", # An enum value indicating + level of noise. Required. Known values are: "low", "medium", and + "high". + "value": 0.0 # A number indicating level of noise + level ranging from 0 to 1. [0, 0.25) is under exposure. [0.25, 0.75) + is good exposure. [0.75, 1] is over exposure. [0, 0.3) is low noise + level. [0.3, 0.7) is medium noise level. [0.7, 1] is high noise + level. Required. + }, + "occlusion": { + "eyeOccluded": bool, # A boolean value indicating + whether eyes are occluded. Required. + "foreheadOccluded": bool, # A boolean value + indicating whether forehead is occluded. Required. + "mouthOccluded": bool # A boolean value indicating + whether the mouth is occluded. Required. + }, + "qualityForRecognition": "str", # Optional. Properties + describing the overall image quality regarding whether the image being + used in the detection is of sufficient quality to attempt face + recognition on. Known values are: "low", "medium", and "high". + "smile": 0.0 # Optional. Smile intensity, a number between + [0,1]. + }, + "faceId": "str", # Optional. Unique faceId of the detected face, + created by detection API and it will expire 24 hours after the detection + call. To return this, it requires 'returnFaceId' parameter to be true. + "faceLandmarks": { + "eyeLeftBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeLeftInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeLeftOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeLeftTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowLeftInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowLeftOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowRightInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowRightOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "mouthLeft": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "mouthRight": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseLeftAlarOutTip": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseLeftAlarTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRightAlarOutTip": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRightAlarTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRootLeft": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRootRight": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseTip": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "pupilLeft": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "pupilRight": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "underLipBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "underLipTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "upperLipBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "upperLipTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + } + }, + "recognitionModel": "str" # Optional. The 'recognitionModel' + associated with this faceId. This is only returned when + 'returnRecognitionModel' is explicitly set as true. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + cls: ClsType[List[_models.FaceDetectionResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if url is _Unset: + raise TypeError("missing required argument: url") + body = {"url": url} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_detect_from_url_request( + detection_model=detection_model, + recognition_model=recognition_model, + return_face_id=return_face_id, + return_face_attributes=return_face_attributes, + return_face_landmarks=return_face_landmarks, + return_recognition_model=return_recognition_model, + face_id_time_to_live=face_id_time_to_live, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceDetectionResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def _detect( + self, + image_content: bytes, + *, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + return_face_id: Optional[bool] = None, + return_face_attributes: Optional[List[Union[str, _models.FaceAttributeType]]] = None, + return_face_landmarks: Optional[bool] = None, + return_recognition_model: Optional[bool] = None, + face_id_time_to_live: Optional[int] = None, + **kwargs: Any, + ) -> List[_models.FaceDetectionResult]: + # pylint: disable=line-too-long + """Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, + and attributes. + + .. + + [!IMPORTANT] + To mitigate potential misuse that can subject people to stereotyping, discrimination, or + unfair denial of services, we are retiring Face API attributes that predict emotion, gender, + age, smile, facial hair, hair, and makeup. Read more about this decision + https://azure.microsoft.com/en-us/blog/responsible-ai-investments-and-safeguards-for-facial-recognition/. + + + * + + + * No image will be stored. Only the extracted face feature(s) will be stored on server. The + faceId is an identifier of the face feature and will be used in "Identify", "Verify", and "Find + Similar". The stored face features will expire and be deleted at the time specified by + faceIdTimeToLive after the original detection call. + * Optional parameters include faceId, landmarks, and attributes. Attributes include headPose, + glasses, occlusion, accessories, blur, exposure, noise, mask, and qualityForRecognition. Some + of the results returned for specific attributes may not be highly accurate. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from + large to small. + * For optimal results when querying "Identify", "Verify", and "Find Similar" ('returnFaceId' is + true), please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels + (100 pixels between eyes). + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + * 'detection_02': Face attributes and landmarks are disabled if you choose this detection + model. + * 'detection_03': Face attributes (mask and headPose only) and landmarks are supported if you + choose this detection model. + + * Different 'recognitionModel' values are provided. If follow-up operations like "Verify", + "Identify", "Find Similar" are needed, please specify the recognition model with + 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if + latest model needed, please explicitly specify the model you need in this parameter. Once + specified, the detected faceIds will be associated with the specified recognition model. More + details, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-recognition-model. + + :param image_content: The input image binary. Required. + :type image_content: bytes + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword recognition_model: The 'recognitionModel' associated with the detected faceIds. + Supported 'recognitionModel' values include 'recognition_01', 'recognition_02', + 'recognition_03' or 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' + is recommended since its accuracy is improved on faces wearing masks compared with + 'recognition_03', and its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", "recognition_03", and + "recognition_04". Default value is None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :keyword return_face_id: Return faceIds of the detected faces or not. The default value is + true. Default value is None. + :paramtype return_face_id: bool + :keyword return_face_attributes: Analyze and return the one or more specified face attributes + in the comma-separated string like 'returnFaceAttributes=headPose,glasses'. Face attribute + analysis has additional computational and time cost. Default value is None. + :paramtype return_face_attributes: list[str or ~azure.ai.vision.face.models.FaceAttributeType] + :keyword return_face_landmarks: Return face landmarks of the detected faces or not. The default + value is false. Default value is None. + :paramtype return_face_landmarks: bool + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. This is only applicable when returnFaceId = true. Default value is None. + :paramtype return_recognition_model: bool + :keyword face_id_time_to_live: The number of seconds for the face ID being cached. Supported + range from 60 seconds up to 86400 seconds. The default value is 86400 (24 hours). Default value + is None. + :paramtype face_id_time_to_live: int + :return: list of FaceDetectionResult + :rtype: list[~azure.ai.vision.face.models.FaceDetectionResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "faceRectangle": { + "height": 0, # The height of the rectangle, in pixels. + Required. + "left": 0, # The distance from the left edge if the image to + the left edge of the rectangle, in pixels. Required. + "top": 0, # The distance from the top edge if the image to + the top edge of the rectangle, in pixels. Required. + "width": 0 # The width of the rectangle, in pixels. + Required. + }, + "faceAttributes": { + "accessories": [ + { + "confidence": 0.0, # Confidence level of the + accessory type. Range between [0,1]. Required. + "type": "str" # Type of the accessory. + Required. Known values are: "headwear", "glasses", and "mask". + } + ], + "age": 0.0, # Optional. Age in years. + "blur": { + "blurLevel": "str", # An enum value indicating level + of blurriness. Required. Known values are: "low", "medium", and + "high". + "value": 0.0 # A number indicating level of + blurriness ranging from 0 to 1. Required. + }, + "exposure": { + "exposureLevel": "str", # An enum value indicating + level of exposure. Required. Known values are: "underExposure", + "goodExposure", and "overExposure". + "value": 0.0 # A number indicating level of exposure + level ranging from 0 to 1. [0, 0.25) is under exposure. [0.25, 0.75) + is good exposure. [0.75, 1] is over exposure. Required. + }, + "facialHair": { + "beard": 0.0, # A number ranging from 0 to 1 + indicating a level of confidence associated with a property. + Required. + "moustache": 0.0, # A number ranging from 0 to 1 + indicating a level of confidence associated with a property. + Required. + "sideburns": 0.0 # A number ranging from 0 to 1 + indicating a level of confidence associated with a property. + Required. + }, + "glasses": "str", # Optional. Glasses type if any of the + face. Known values are: "noGlasses", "readingGlasses", "sunglasses", and + "swimmingGoggles". + "hair": { + "bald": 0.0, # A number describing confidence level + of whether the person is bald. Required. + "hairColor": [ + { + "color": "str", # Name of the hair + color. Required. Known values are: "unknown", "white", + "gray", "blond", "brown", "red", "black", and "other". + "confidence": 0.0 # Confidence level + of the color. Range between [0,1]. Required. + } + ], + "invisible": bool # A boolean value describing + whether the hair is visible in the image. Required. + }, + "headPose": { + "pitch": 0.0, # Value of angles. Required. + "roll": 0.0, # Value of angles. Required. + "yaw": 0.0 # Value of angles. Required. + }, + "mask": { + "noseAndMouthCovered": bool, # A boolean value + indicating whether nose and mouth are covered. Required. + "type": "str" # Type of the mask. Required. Known + values are: "faceMask", "noMask", "otherMaskOrOcclusion", and + "uncertain". + }, + "noise": { + "noiseLevel": "str", # An enum value indicating + level of noise. Required. Known values are: "low", "medium", and + "high". + "value": 0.0 # A number indicating level of noise + level ranging from 0 to 1. [0, 0.25) is under exposure. [0.25, 0.75) + is good exposure. [0.75, 1] is over exposure. [0, 0.3) is low noise + level. [0.3, 0.7) is medium noise level. [0.7, 1] is high noise + level. Required. + }, + "occlusion": { + "eyeOccluded": bool, # A boolean value indicating + whether eyes are occluded. Required. + "foreheadOccluded": bool, # A boolean value + indicating whether forehead is occluded. Required. + "mouthOccluded": bool # A boolean value indicating + whether the mouth is occluded. Required. + }, + "qualityForRecognition": "str", # Optional. Properties + describing the overall image quality regarding whether the image being + used in the detection is of sufficient quality to attempt face + recognition on. Known values are: "low", "medium", and "high". + "smile": 0.0 # Optional. Smile intensity, a number between + [0,1]. + }, + "faceId": "str", # Optional. Unique faceId of the detected face, + created by detection API and it will expire 24 hours after the detection + call. To return this, it requires 'returnFaceId' parameter to be true. + "faceLandmarks": { + "eyeLeftBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeLeftInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeLeftOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeLeftTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowLeftInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowLeftOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowRightInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowRightOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "mouthLeft": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "mouthRight": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseLeftAlarOutTip": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseLeftAlarTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRightAlarOutTip": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRightAlarTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRootLeft": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRootRight": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseTip": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "pupilLeft": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "pupilRight": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "underLipBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "underLipTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "upperLipBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "upperLipTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + } + }, + "recognitionModel": "str" # Optional. The 'recognitionModel' + associated with this faceId. This is only returned when + 'returnRecognitionModel' is explicitly set as true. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[List[_models.FaceDetectionResult]] = kwargs.pop("cls", None) + + _content = image_content + + _request = build_face_detect_request( + detection_model=detection_model, + recognition_model=recognition_model, + return_face_id=return_face_id, + return_face_attributes=return_face_attributes, + return_face_landmarks=return_face_landmarks, + return_recognition_model=return_recognition_model, + face_id_time_to_live=face_id_time_to_live, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceDetectionResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def find_similar( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a faceId array. A faceId + array contains the faces created by Detect. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target faceId array. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # faceId of the query face. User needs to call "Detect" + first to get a valid faceId. Note that this faceId is not persisted and will + expire 24 hours after the detection call. Required. + "faceIds": [ + "str" # An array of candidate faceIds. All of them are created by + "Detect" and the faceIds will expire 24 hours after the detection call. The + number of faceIds is limited to 1000. Required. + ], + "maxNumOfCandidatesReturned": 0, # Optional. The number of top similar faces + returned. The valid range is [1, 1000]. Default value is 20. + "mode": "str" # Optional. Similar face searching mode. It can be + 'matchPerson' or 'matchFace'. Default value is 'matchPerson'. Known values are: + "matchPerson" and "matchFace". + } + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @overload + def find_similar( + self, + *, + face_id: str, + face_ids: List[str], + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + mode: Optional[Union[str, _models.FindSimilarMatchMode]] = None, + **kwargs: Any, + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a faceId array. A faceId + array contains the faces created by Detect. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target faceId array. + + :keyword face_id: faceId of the query face. User needs to call "Detect" first to get a valid + faceId. Note that this faceId is not persisted and will expire 24 hours after the detection + call. Required. + :paramtype face_id: str + :keyword face_ids: An array of candidate faceIds. All of them are created by "Detect" and the + faceIds will expire 24 hours after the detection call. The number of faceIds is limited to + 1000. Required. + :paramtype face_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The number of top similar faces returned. The valid + range is [1, 1000]. Default value is 20. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword mode: Similar face searching mode. It can be 'matchPerson' or 'matchFace'. Default + value is 'matchPerson'. Known values are: "matchPerson" and "matchFace". Default value is None. + :paramtype mode: str or ~azure.ai.vision.face.models.FindSimilarMatchMode + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @overload + def find_similar( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a faceId array. A faceId + array contains the faces created by Detect. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target faceId array. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @distributed_trace + def find_similar( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_id: str = _Unset, + face_ids: List[str] = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + mode: Optional[Union[str, _models.FindSimilarMatchMode]] = None, + **kwargs: Any, + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a faceId array. A faceId + array contains the faces created by Detect. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target faceId array. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id: faceId of the query face. User needs to call "Detect" first to get a valid + faceId. Note that this faceId is not persisted and will expire 24 hours after the detection + call. Required. + :paramtype face_id: str + :keyword face_ids: An array of candidate faceIds. All of them are created by "Detect" and the + faceIds will expire 24 hours after the detection call. The number of faceIds is limited to + 1000. Required. + :paramtype face_ids: list[str] + :keyword max_num_of_candidates_returned: The number of top similar faces returned. The valid + range is [1, 1000]. Default value is 20. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword mode: Similar face searching mode. It can be 'matchPerson' or 'matchFace'. Default + value is 'matchPerson'. Known values are: "matchPerson" and "matchFace". Default value is None. + :paramtype mode: str or ~azure.ai.vision.face.models.FindSimilarMatchMode + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # faceId of the query face. User needs to call "Detect" + first to get a valid faceId. Note that this faceId is not persisted and will + expire 24 hours after the detection call. Required. + "faceIds": [ + "str" # An array of candidate faceIds. All of them are created by + "Detect" and the faceIds will expire 24 hours after the detection call. The + number of faceIds is limited to 1000. Required. + ], + "maxNumOfCandidatesReturned": 0, # Optional. The number of top similar faces + returned. The valid range is [1, 1000]. Default value is 20. + "mode": "str" # Optional. Similar face searching mode. It can be + 'matchPerson' or 'matchFace'. Default value is 'matchPerson'. Known values are: + "matchPerson" and "matchFace". + } + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceFindSimilarResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id is _Unset: + raise TypeError("missing required argument: face_id") + if face_ids is _Unset: + raise TypeError("missing required argument: face_ids") + body = { + "faceid": face_id, + "faceids": face_ids, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + "mode": mode, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_find_similar_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceFindSimilarResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def find_similar_from_face_list( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Face List. A 'faceListId' + is created by Create Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Face List. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # faceId of the query face. User needs to call "Detect" + first to get a valid faceId. Note that this faceId is not persisted and will + expire 24 hours after the detection call. Required. + "faceListId": "str", # An existing user-specified unique candidate Face + List, created in "Create Face List". Face List contains a set of persistedFaceIds + which are persisted and will never expire. Required. + "maxNumOfCandidatesReturned": 0, # Optional. The number of top similar faces + returned. The valid range is [1, 1000]. Default value is 20. + "mode": "str" # Optional. Similar face searching mode. It can be + 'matchPerson' or 'matchFace'. Default value is 'matchPerson'. Known values are: + "matchPerson" and "matchFace". + } + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @overload + def find_similar_from_face_list( + self, + *, + face_id: str, + face_list_id: str, + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + mode: Optional[Union[str, _models.FindSimilarMatchMode]] = None, + **kwargs: Any, + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Face List. A 'faceListId' + is created by Create Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Face List. + + :keyword face_id: faceId of the query face. User needs to call "Detect" first to get a valid + faceId. Note that this faceId is not persisted and will expire 24 hours after the detection + call. Required. + :paramtype face_id: str + :keyword face_list_id: An existing user-specified unique candidate Face List, created in + "Create Face List". Face List contains a set of persistedFaceIds which are persisted and will + never expire. Required. + :paramtype face_list_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The number of top similar faces returned. The valid + range is [1, 1000]. Default value is 20. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword mode: Similar face searching mode. It can be 'matchPerson' or 'matchFace'. Default + value is 'matchPerson'. Known values are: "matchPerson" and "matchFace". Default value is None. + :paramtype mode: str or ~azure.ai.vision.face.models.FindSimilarMatchMode + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @overload + def find_similar_from_face_list( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Face List. A 'faceListId' + is created by Create Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Face List. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @distributed_trace + def find_similar_from_face_list( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_id: str = _Unset, + face_list_id: str = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + mode: Optional[Union[str, _models.FindSimilarMatchMode]] = None, + **kwargs: Any, + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Face List. A 'faceListId' + is created by Create Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Face List. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id: faceId of the query face. User needs to call "Detect" first to get a valid + faceId. Note that this faceId is not persisted and will expire 24 hours after the detection + call. Required. + :paramtype face_id: str + :keyword face_list_id: An existing user-specified unique candidate Face List, created in + "Create Face List". Face List contains a set of persistedFaceIds which are persisted and will + never expire. Required. + :paramtype face_list_id: str + :keyword max_num_of_candidates_returned: The number of top similar faces returned. The valid + range is [1, 1000]. Default value is 20. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword mode: Similar face searching mode. It can be 'matchPerson' or 'matchFace'. Default + value is 'matchPerson'. Known values are: "matchPerson" and "matchFace". Default value is None. + :paramtype mode: str or ~azure.ai.vision.face.models.FindSimilarMatchMode + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # faceId of the query face. User needs to call "Detect" + first to get a valid faceId. Note that this faceId is not persisted and will + expire 24 hours after the detection call. Required. + "faceListId": "str", # An existing user-specified unique candidate Face + List, created in "Create Face List". Face List contains a set of persistedFaceIds + which are persisted and will never expire. Required. + "maxNumOfCandidatesReturned": 0, # Optional. The number of top similar faces + returned. The valid range is [1, 1000]. Default value is 20. + "mode": "str" # Optional. Similar face searching mode. It can be + 'matchPerson' or 'matchFace'. Default value is 'matchPerson'. Known values are: + "matchPerson" and "matchFace". + } + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceFindSimilarResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id is _Unset: + raise TypeError("missing required argument: face_id") + if face_list_id is _Unset: + raise TypeError("missing required argument: face_list_id") + body = { + "faceid": face_id, + "facelistid": face_list_id, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + "mode": mode, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_find_similar_from_face_list_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceFindSimilarResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def find_similar_from_large_face_list( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Large Face List. A + 'largeFaceListId' is created by Create Large Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Large Face List. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # faceId of the query face. User needs to call "Detect" + first to get a valid faceId. Note that this faceId is not persisted and will + expire 24 hours after the detection call. Required. + "largeFaceListId": "str", # An existing user-specified unique candidate + Large Face List, created in "Create Large Face List". Large Face List contains a + set of persistedFaceIds which are persisted and will never expire. Required. + "maxNumOfCandidatesReturned": 0, # Optional. The number of top similar faces + returned. The valid range is [1, 1000]. Default value is 20. + "mode": "str" # Optional. Similar face searching mode. It can be + 'matchPerson' or 'matchFace'. Default value is 'matchPerson'. Known values are: + "matchPerson" and "matchFace". + } + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @overload + def find_similar_from_large_face_list( + self, + *, + face_id: str, + large_face_list_id: str, + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + mode: Optional[Union[str, _models.FindSimilarMatchMode]] = None, + **kwargs: Any, + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Large Face List. A + 'largeFaceListId' is created by Create Large Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Large Face List. + + :keyword face_id: faceId of the query face. User needs to call "Detect" first to get a valid + faceId. Note that this faceId is not persisted and will expire 24 hours after the detection + call. Required. + :paramtype face_id: str + :keyword large_face_list_id: An existing user-specified unique candidate Large Face List, + created in "Create Large Face List". Large Face List contains a set of persistedFaceIds which + are persisted and will never expire. Required. + :paramtype large_face_list_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The number of top similar faces returned. The valid + range is [1, 1000]. Default value is 20. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword mode: Similar face searching mode. It can be 'matchPerson' or 'matchFace'. Default + value is 'matchPerson'. Known values are: "matchPerson" and "matchFace". Default value is None. + :paramtype mode: str or ~azure.ai.vision.face.models.FindSimilarMatchMode + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @overload + def find_similar_from_large_face_list( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Large Face List. A + 'largeFaceListId' is created by Create Large Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Large Face List. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @distributed_trace + def find_similar_from_large_face_list( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_id: str = _Unset, + large_face_list_id: str = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + mode: Optional[Union[str, _models.FindSimilarMatchMode]] = None, + **kwargs: Any, + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Large Face List. A + 'largeFaceListId' is created by Create Large Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Large Face List. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id: faceId of the query face. User needs to call "Detect" first to get a valid + faceId. Note that this faceId is not persisted and will expire 24 hours after the detection + call. Required. + :paramtype face_id: str + :keyword large_face_list_id: An existing user-specified unique candidate Large Face List, + created in "Create Large Face List". Large Face List contains a set of persistedFaceIds which + are persisted and will never expire. Required. + :paramtype large_face_list_id: str + :keyword max_num_of_candidates_returned: The number of top similar faces returned. The valid + range is [1, 1000]. Default value is 20. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword mode: Similar face searching mode. It can be 'matchPerson' or 'matchFace'. Default + value is 'matchPerson'. Known values are: "matchPerson" and "matchFace". Default value is None. + :paramtype mode: str or ~azure.ai.vision.face.models.FindSimilarMatchMode + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # faceId of the query face. User needs to call "Detect" + first to get a valid faceId. Note that this faceId is not persisted and will + expire 24 hours after the detection call. Required. + "largeFaceListId": "str", # An existing user-specified unique candidate + Large Face List, created in "Create Large Face List". Large Face List contains a + set of persistedFaceIds which are persisted and will never expire. Required. + "maxNumOfCandidatesReturned": 0, # Optional. The number of top similar faces + returned. The valid range is [1, 1000]. Default value is 20. + "mode": "str" # Optional. Similar face searching mode. It can be + 'matchPerson' or 'matchFace'. Default value is 'matchPerson'. Known values are: + "matchPerson" and "matchFace". + } + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceFindSimilarResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id is _Unset: + raise TypeError("missing required argument: face_id") + if large_face_list_id is _Unset: + raise TypeError("missing required argument: large_face_list_id") + body = { + "faceid": face_id, + "largefacelistid": large_face_list_id, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + "mode": mode, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_find_similar_from_large_face_list_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceFindSimilarResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def identify_from_person_group( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Group (given by personGroupId), and return candidate + person(s) for that face ranked by similarity confidence. The Person Group should be trained to + make it ready for identification. See more in "Train Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "personGroupId": "str", # personGroupId of the target Person Group, created + by "Create Person Group". Parameter personGroupId and largePersonGroupId should + not be provided at the same time. Required. + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + def identify_from_person_group( + self, + *, + face_ids: List[str], + person_group_id: str, + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any, + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Group (given by personGroupId), and return candidate + person(s) for that face ranked by similarity confidence. The Person Group should be trained to + make it ready for identification. See more in "Train Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group. + + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword person_group_id: personGroupId of the target Person Group, created by "Create Person + Group". Parameter personGroupId and largePersonGroupId should not be provided at the same time. + Required. + :paramtype person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + def identify_from_person_group( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Group (given by personGroupId), and return candidate + person(s) for that face ranked by similarity confidence. The Person Group should be trained to + make it ready for identification. See more in "Train Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @distributed_trace + def identify_from_person_group( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_ids: List[str] = _Unset, + person_group_id: str = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any, + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Group (given by personGroupId), and return candidate + person(s) for that face ranked by similarity confidence. The Person Group should be trained to + make it ready for identification. See more in "Train Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword person_group_id: personGroupId of the target Person Group, created by "Create Person + Group". Parameter personGroupId and largePersonGroupId should not be provided at the same time. + Required. + :paramtype person_group_id: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "personGroupId": "str", # personGroupId of the target Person Group, created + by "Create Person Group". Parameter personGroupId and largePersonGroupId should + not be provided at the same time. Required. + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceIdentificationResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_ids is _Unset: + raise TypeError("missing required argument: face_ids") + if person_group_id is _Unset: + raise TypeError("missing required argument: person_group_id") + body = { + "confidencethreshold": confidence_threshold, + "faceids": face_ids, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + "persongroupid": person_group_id, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_identify_from_person_group_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceIdentificationResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def identify_from_large_person_group( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Large Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Large Person Group (given by largePersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. The Large Person Group + should be trained to make it ready for identification. See more in "Train Large Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group/Large Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group or Large Person Group. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "largePersonGroupId": "str", # largePersonGroupId of the target Large Person + Group, created by "Create Large Person Group". Parameter personGroupId and + largePersonGroupId should not be provided at the same time. Required. + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + def identify_from_large_person_group( + self, + *, + face_ids: List[str], + large_person_group_id: str, + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any, + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Large Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Large Person Group (given by largePersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. The Large Person Group + should be trained to make it ready for identification. See more in "Train Large Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group/Large Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group or Large Person Group. + + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword large_person_group_id: largePersonGroupId of the target Large Person Group, created by + "Create Large Person Group". Parameter personGroupId and largePersonGroupId should not be + provided at the same time. Required. + :paramtype large_person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + def identify_from_large_person_group( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Large Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Large Person Group (given by largePersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. The Large Person Group + should be trained to make it ready for identification. See more in "Train Large Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group/Large Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group or Large Person Group. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @distributed_trace + def identify_from_large_person_group( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_ids: List[str] = _Unset, + large_person_group_id: str = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any, + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Large Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Large Person Group (given by largePersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. The Large Person Group + should be trained to make it ready for identification. See more in "Train Large Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group/Large Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group or Large Person Group. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword large_person_group_id: largePersonGroupId of the target Large Person Group, created by + "Create Large Person Group". Parameter personGroupId and largePersonGroupId should not be + provided at the same time. Required. + :paramtype large_person_group_id: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "largePersonGroupId": "str", # largePersonGroupId of the target Large Person + Group, created by "Create Large Person Group". Parameter personGroupId and + largePersonGroupId should not be provided at the same time. Required. + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceIdentificationResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_ids is _Unset: + raise TypeError("missing required argument: face_ids") + if large_person_group_id is _Unset: + raise TypeError("missing required argument: large_person_group_id") + body = { + "confidencethreshold": confidence_threshold, + "faceids": face_ids, + "largepersongroupid": large_person_group_id, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_identify_from_large_person_group_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceIdentificationResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def identify_from_person_directory( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + person directory personIds array. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Directory Persons (given by personIds), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "personIds": [ + "str" # Array of personIds created in Person Directory "Create + Person". The valid number of personIds is between [1,30]. Required. + ], + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + def identify_from_person_directory( + self, + *, + face_ids: List[str], + person_ids: List[str], + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any, + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + person directory personIds array. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Directory Persons (given by personIds), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword person_ids: Array of personIds created in Person Directory "Create Person". The valid + number of personIds is between [1,30]. Required. + :paramtype person_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + def identify_from_person_directory( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + person directory personIds array. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Directory Persons (given by personIds), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @distributed_trace + def identify_from_person_directory( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_ids: List[str] = _Unset, + person_ids: List[str] = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any, + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + person directory personIds array. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Directory Persons (given by personIds), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword person_ids: Array of personIds created in Person Directory "Create Person". The valid + number of personIds is between [1,30]. Required. + :paramtype person_ids: list[str] + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "personIds": [ + "str" # Array of personIds created in Person Directory "Create + Person". The valid number of personIds is between [1,30]. Required. + ], + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceIdentificationResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_ids is _Unset: + raise TypeError("missing required argument: face_ids") + if person_ids is _Unset: + raise TypeError("missing required argument: person_ids") + body = { + "confidencethreshold": confidence_threshold, + "faceids": face_ids, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + "personids": person_ids, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_identify_from_person_directory_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceIdentificationResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def identify_from_dynamic_person_group( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Dynamic Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Dynamic Person Group (given by dynamicPersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "dynamicPersonGroupId": "str", # DynamicPersonGroupId of the target + PersonDirectory DynamicPersonGroup to match against. Required. + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + def identify_from_dynamic_person_group( + self, + *, + face_ids: List[str], + dynamic_person_group_id: str, + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any, + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Dynamic Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Dynamic Person Group (given by dynamicPersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword dynamic_person_group_id: DynamicPersonGroupId of the target PersonDirectory + DynamicPersonGroup to match against. Required. + :paramtype dynamic_person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + def identify_from_dynamic_person_group( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Dynamic Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Dynamic Person Group (given by dynamicPersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @distributed_trace + def identify_from_dynamic_person_group( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_ids: List[str] = _Unset, + dynamic_person_group_id: str = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any, + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Dynamic Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Dynamic Person Group (given by dynamicPersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword dynamic_person_group_id: DynamicPersonGroupId of the target PersonDirectory + DynamicPersonGroup to match against. Required. + :paramtype dynamic_person_group_id: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "dynamicPersonGroupId": "str", # DynamicPersonGroupId of the target + PersonDirectory DynamicPersonGroup to match against. Required. + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceIdentificationResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_ids is _Unset: + raise TypeError("missing required argument: face_ids") + if dynamic_person_group_id is _Unset: + raise TypeError("missing required argument: dynamic_person_group_id") + body = { + "confidencethreshold": confidence_threshold, + "dynamicpersongroupid": dynamic_person_group_id, + "faceids": face_ids, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_identify_from_dynamic_person_group_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceIdentificationResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def verify_face_to_face( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether two faces belong to a same person. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the both faces should be the same. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId1": "str", # The faceId of one face, come from "Detect". Required. + "faceId2": "str" # The faceId of another face, come from "Detect". Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + def verify_face_to_face( + self, *, face_id1: str, face_id2: str, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether two faces belong to a same person. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the both faces should be the same. + + :keyword face_id1: The faceId of one face, come from "Detect". Required. + :paramtype face_id1: str + :keyword face_id2: The faceId of another face, come from "Detect". Required. + :paramtype face_id2: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + def verify_face_to_face( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether two faces belong to a same person. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the both faces should be the same. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @distributed_trace + def verify_face_to_face( + self, body: Union[JSON, IO[bytes]] = _Unset, *, face_id1: str = _Unset, face_id2: str = _Unset, **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether two faces belong to a same person. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the both faces should be the same. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id1: The faceId of one face, come from "Detect". Required. + :paramtype face_id1: str + :keyword face_id2: The faceId of another face, come from "Detect". Required. + :paramtype face_id2: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId1": "str", # The faceId of one face, come from "Detect". Required. + "faceId2": "str" # The faceId of another face, come from "Detect". Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FaceVerificationResult] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id1 is _Unset: + raise TypeError("missing required argument: face_id1") + if face_id2 is _Unset: + raise TypeError("missing required argument: face_id2") + body = {"faceid1": face_id1, "faceid2": face_id2} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_verify_face_to_face_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceVerificationResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def verify_from_person_group( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Person Group. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # The faceId of the face, come from "Detect". Required. + "personGroupId": "str", # Using existing personGroupId and personId for fast + loading a specified person. personGroupId is created in "Create Person Group". + Required. + "personId": "str" # Specify a certain person in Person Group. Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + def verify_from_person_group( + self, + *, + face_id: str, + person_group_id: str, + person_id: str, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Person Group. + + :keyword face_id: The faceId of the face, come from "Detect". Required. + :paramtype face_id: str + :keyword person_group_id: Using existing personGroupId and personId for fast loading a + specified person. personGroupId is created in "Create Person Group". Required. + :paramtype person_group_id: str + :keyword person_id: Specify a certain person in Person Group. Required. + :paramtype person_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + def verify_from_person_group( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Person Group. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @distributed_trace + def verify_from_person_group( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_id: str = _Unset, + person_group_id: str = _Unset, + person_id: str = _Unset, + **kwargs: Any, + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Person Group. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id: The faceId of the face, come from "Detect". Required. + :paramtype face_id: str + :keyword person_group_id: Using existing personGroupId and personId for fast loading a + specified person. personGroupId is created in "Create Person Group". Required. + :paramtype person_group_id: str + :keyword person_id: Specify a certain person in Person Group. Required. + :paramtype person_id: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # The faceId of the face, come from "Detect". Required. + "personGroupId": "str", # Using existing personGroupId and personId for fast + loading a specified person. personGroupId is created in "Create Person Group". + Required. + "personId": "str" # Specify a certain person in Person Group. Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FaceVerificationResult] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id is _Unset: + raise TypeError("missing required argument: face_id") + if person_group_id is _Unset: + raise TypeError("missing required argument: person_group_id") + if person_id is _Unset: + raise TypeError("missing required argument: person_id") + body = {"faceid": face_id, "persongroupid": person_group_id, "personid": person_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_verify_from_person_group_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceVerificationResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def verify_from_large_person_group( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Large Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Large Person Group. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # The faceId of the face, come from "Detect". Required. + "largePersonGroupId": "str", # Using existing largePersonGroupId and + personId for fast loading a specified person. largePersonGroupId is created in + "Create Large Person Group". Required. + "personId": "str" # Specify a certain person in Large Person Group. + Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + def verify_from_large_person_group( + self, + *, + face_id: str, + large_person_group_id: str, + person_id: str, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Large Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Large Person Group. + + :keyword face_id: The faceId of the face, come from "Detect". Required. + :paramtype face_id: str + :keyword large_person_group_id: Using existing largePersonGroupId and personId for fast loading + a specified person. largePersonGroupId is created in "Create Large Person Group". Required. + :paramtype large_person_group_id: str + :keyword person_id: Specify a certain person in Large Person Group. Required. + :paramtype person_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + def verify_from_large_person_group( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Large Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Large Person Group. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @distributed_trace + def verify_from_large_person_group( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_id: str = _Unset, + large_person_group_id: str = _Unset, + person_id: str = _Unset, + **kwargs: Any, + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Large Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Large Person Group. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id: The faceId of the face, come from "Detect". Required. + :paramtype face_id: str + :keyword large_person_group_id: Using existing largePersonGroupId and personId for fast loading + a specified person. largePersonGroupId is created in "Create Large Person Group". Required. + :paramtype large_person_group_id: str + :keyword person_id: Specify a certain person in Large Person Group. Required. + :paramtype person_id: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # The faceId of the face, come from "Detect". Required. + "largePersonGroupId": "str", # Using existing largePersonGroupId and + personId for fast loading a specified person. largePersonGroupId is created in + "Create Large Person Group". Required. + "personId": "str" # Specify a certain person in Large Person Group. + Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FaceVerificationResult] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id is _Unset: + raise TypeError("missing required argument: face_id") + if large_person_group_id is _Unset: + raise TypeError("missing required argument: large_person_group_id") + if person_id is _Unset: + raise TypeError("missing required argument: person_id") + body = {"faceid": face_id, "largepersongroupid": large_person_group_id, "personid": person_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_verify_from_large_person_group_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceVerificationResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def verify_from_person_directory( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in Person Directory. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The Verify operation can only match faces obtained with the same recognition model, that + is associated with the query face. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # The faceId of the face, come from "Detect". Required. + "personId": "str" # Specify a certain person in PersonDirectory Person. + Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + def verify_from_person_directory( + self, *, face_id: str, person_id: str, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in Person Directory. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The Verify operation can only match faces obtained with the same recognition model, that + is associated with the query face. + + :keyword face_id: The faceId of the face, come from "Detect". Required. + :paramtype face_id: str + :keyword person_id: Specify a certain person in PersonDirectory Person. Required. + :paramtype person_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + def verify_from_person_directory( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in Person Directory. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The Verify operation can only match faces obtained with the same recognition model, that + is associated with the query face. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @distributed_trace + def verify_from_person_directory( + self, body: Union[JSON, IO[bytes]] = _Unset, *, face_id: str = _Unset, person_id: str = _Unset, **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in Person Directory. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The Verify operation can only match faces obtained with the same recognition model, that + is associated with the query face. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id: The faceId of the face, come from "Detect". Required. + :paramtype face_id: str + :keyword person_id: Specify a certain person in PersonDirectory Person. Required. + :paramtype person_id: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # The faceId of the face, come from "Detect". Required. + "personId": "str" # Specify a certain person in PersonDirectory Person. + Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FaceVerificationResult] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id is _Unset: + raise TypeError("missing required argument: face_id") + if person_id is _Unset: + raise TypeError("missing required argument: person_id") + body = {"faceid": face_id, "personid": person_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_verify_from_person_directory_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceVerificationResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def group(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.FaceGroupingResult: + # pylint: disable=line-too-long + """Divide candidate faces into groups based on face similarity. + + > + * + + + * The output is one or more disjointed face groups and a messyGroup. A face group contains + faces that have similar looking, often of the same person. Face groups are ranked by group + size, i.e. number of faces. Notice that faces belonging to a same person might be split into + several groups in the result. + * MessyGroup is a special face group containing faces that cannot find any similar counterpart + face from original faces. The messyGroup will not appear in the result if all faces found their + counterparts. + * Group API needs at least 2 candidate faces and 1000 at most. We suggest to try "Verify Face + To Face" when you only have 2 candidate faces. + * The 'recognitionModel' associated with the query faces' faceIds should be the same. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceGroupingResult. The FaceGroupingResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceGroupingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of candidate faceIds created by "Detect". The maximum + is 1000 faces. Required. + ] + } + + # response body for status code(s): 200 + response == { + "groups": [ + [ + "str" # A partition of the original faces based on face + similarity. Groups are ranked by number of faces. Required. + ] + ], + "messyGroup": [ + "str" # Face ids array of faces that cannot find any similar faces + from original faces. Required. + ] + } + """ + + @overload + def group( + self, *, face_ids: List[str], content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceGroupingResult: + # pylint: disable=line-too-long + """Divide candidate faces into groups based on face similarity. + + > + * + + + * The output is one or more disjointed face groups and a messyGroup. A face group contains + faces that have similar looking, often of the same person. Face groups are ranked by group + size, i.e. number of faces. Notice that faces belonging to a same person might be split into + several groups in the result. + * MessyGroup is a special face group containing faces that cannot find any similar counterpart + face from original faces. The messyGroup will not appear in the result if all faces found their + counterparts. + * Group API needs at least 2 candidate faces and 1000 at most. We suggest to try "Verify Face + To Face" when you only have 2 candidate faces. + * The 'recognitionModel' associated with the query faces' faceIds should be the same. + + :keyword face_ids: Array of candidate faceIds created by "Detect". The maximum is 1000 faces. + Required. + :paramtype face_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceGroupingResult. The FaceGroupingResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceGroupingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "groups": [ + [ + "str" # A partition of the original faces based on face + similarity. Groups are ranked by number of faces. Required. + ] + ], + "messyGroup": [ + "str" # Face ids array of faces that cannot find any similar faces + from original faces. Required. + ] + } + """ + + @overload + def group( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceGroupingResult: + # pylint: disable=line-too-long + """Divide candidate faces into groups based on face similarity. + + > + * + + + * The output is one or more disjointed face groups and a messyGroup. A face group contains + faces that have similar looking, often of the same person. Face groups are ranked by group + size, i.e. number of faces. Notice that faces belonging to a same person might be split into + several groups in the result. + * MessyGroup is a special face group containing faces that cannot find any similar counterpart + face from original faces. The messyGroup will not appear in the result if all faces found their + counterparts. + * Group API needs at least 2 candidate faces and 1000 at most. We suggest to try "Verify Face + To Face" when you only have 2 candidate faces. + * The 'recognitionModel' associated with the query faces' faceIds should be the same. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceGroupingResult. The FaceGroupingResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceGroupingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "groups": [ + [ + "str" # A partition of the original faces based on face + similarity. Groups are ranked by number of faces. Required. + ] + ], + "messyGroup": [ + "str" # Face ids array of faces that cannot find any similar faces + from original faces. Required. + ] + } + """ + + @distributed_trace + def group( + self, body: Union[JSON, IO[bytes]] = _Unset, *, face_ids: List[str] = _Unset, **kwargs: Any + ) -> _models.FaceGroupingResult: + # pylint: disable=line-too-long + """Divide candidate faces into groups based on face similarity. + + > + * + + + * The output is one or more disjointed face groups and a messyGroup. A face group contains + faces that have similar looking, often of the same person. Face groups are ranked by group + size, i.e. number of faces. Notice that faces belonging to a same person might be split into + several groups in the result. + * MessyGroup is a special face group containing faces that cannot find any similar counterpart + face from original faces. The messyGroup will not appear in the result if all faces found their + counterparts. + * Group API needs at least 2 candidate faces and 1000 at most. We suggest to try "Verify Face + To Face" when you only have 2 candidate faces. + * The 'recognitionModel' associated with the query faces' faceIds should be the same. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_ids: Array of candidate faceIds created by "Detect". The maximum is 1000 faces. + Required. + :paramtype face_ids: list[str] + :return: FaceGroupingResult. The FaceGroupingResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceGroupingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of candidate faceIds created by "Detect". The maximum + is 1000 faces. Required. + ] + } + + # response body for status code(s): 200 + response == { + "groups": [ + [ + "str" # A partition of the original faces based on face + similarity. Groups are ranked by number of faces. Required. + ] + ], + "messyGroup": [ + "str" # Face ids array of faces that cannot find any similar faces + from original faces. Required. + ] + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FaceGroupingResult] = kwargs.pop("cls", None) + + if body is _Unset: + if face_ids is _Unset: + raise TypeError("missing required argument: face_ids") + body = {"faceids": face_ids} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_group_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceGroupingResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class FaceAdministrationClientOperationsMixin( # pylint: disable=too-many-public-methods + FaceAdministrationClientMixinABC +): + + @overload + def create_face_list( # pylint: disable=inconsistent-return-statements + self, face_list_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + # pylint: disable=line-too-long + """Create an empty Face List with user-specified faceListId, name, an optional userData and + recognitionModel. + + Up to 64 Face Lists are allowed in one subscription. + + Face List is a list of faces, up to 1,000 faces, and used by "Find Similar From Face List". + + After creation, user should use "Add Face List Face" to import the faces. No image will be + stored. Only the extracted face feature(s) will be stored on server until "Delete Face List" is + called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + Please consider Large Face List when the face number is large. It can support up to 1,000,000 + faces. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def create_face_list( # pylint: disable=inconsistent-return-statements + self, + face_list_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any, + ) -> None: + """Create an empty Face List with user-specified faceListId, name, an optional userData and + recognitionModel. + + Up to 64 Face Lists are allowed in one subscription. + + Face List is a list of faces, up to 1,000 faces, and used by "Find Similar From Face List". + + After creation, user should use "Add Face List Face" to import the faces. No image will be + stored. Only the extracted face feature(s) will be stored on server until "Delete Face List" is + called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + Please consider Large Face List when the face number is large. It can support up to 1,000,000 + faces. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_face_list( # pylint: disable=inconsistent-return-statements + self, face_list_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Create an empty Face List with user-specified faceListId, name, an optional userData and + recognitionModel. + + Up to 64 Face Lists are allowed in one subscription. + + Face List is a list of faces, up to 1,000 faces, and used by "Find Similar From Face List". + + After creation, user should use "Add Face List Face" to import the faces. No image will be + stored. Only the extracted face feature(s) will be stored on server until "Delete Face List" is + called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + Please consider Large Face List when the face number is large. It can support up to 1,000,000 + faces. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_face_list( # pylint: disable=inconsistent-return-statements + self, + face_list_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any, + ) -> None: + # pylint: disable=line-too-long + """Create an empty Face List with user-specified faceListId, name, an optional userData and + recognitionModel. + + Up to 64 Face Lists are allowed in one subscription. + + Face List is a list of faces, up to 1,000 faces, and used by "Find Similar From Face List". + + After creation, user should use "Add Face List Face" to import the faces. No image will be + stored. Only the extracted face feature(s) will be stored on server until "Delete Face List" is + called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + Please consider Large Face List when the face number is large. It can support up to 1,000,000 + faces. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "recognitionmodel": recognition_model, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_face_list_request( + face_list_id=face_list_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def delete_face_list( # pylint: disable=inconsistent-return-statements + self, face_list_id: str, **kwargs: Any + ) -> None: + """Delete a specified Face List. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_face_list_request( + face_list_id=face_list_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_face_list( + self, face_list_id: str, *, return_recognition_model: Optional[bool] = None, **kwargs: Any + ) -> _models.FaceList: + # pylint: disable=line-too-long + """Retrieve a Face List's faceListId, name, userData, recognitionModel and faces in the Face List. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: FaceList. The FaceList is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceList + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "faceListId": "str", # Valid character is letter in lower case or digit or + '-' or '_', maximum length is 64. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "persistedFaces": [ + { + "persistedFaceId": "str", # Face ID of the face. Required. + "userData": "str" # Optional. User-provided data attached to + the face. The length limit is 1K. + } + ], + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FaceList] = kwargs.pop("cls", None) + + _request = build_face_administration_get_face_list_request( + face_list_id=face_list_id, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceList, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_face_list( # pylint: disable=inconsistent-return-statements + self, face_list_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update information of a Face List, including name and userData. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def update_face_list( # pylint: disable=inconsistent-return-statements + self, + face_list_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update information of a Face List, including name and userData. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_face_list( # pylint: disable=inconsistent-return-statements + self, face_list_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update information of a Face List, including name and userData. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_face_list( # pylint: disable=inconsistent-return-statements + self, + face_list_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update information of a Face List, including name and userData. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_face_list_request( + face_list_id=face_list_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_face_lists( + self, *, return_recognition_model: Optional[bool] = None, **kwargs: Any + ) -> List[_models.FaceListItem]: + # pylint: disable=line-too-long + """List Face Lists' faceListId, name, userData and recognitionModel. + + To get face information inside Face List use "Get Face List". + + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: list of FaceListItem + :rtype: list[~azure.ai.vision.face.models.FaceListItem] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "faceListId": "str", # Valid character is letter in lower case or + digit or '-' or '_', maximum length is 64. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.FaceListItem]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_face_lists_request( + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceListItem], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def add_face_list_face_from_url( + self, + face_list_id: str, + body: JSON, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Face List, up to 1,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Face List + Face" or "Delete Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Required. + :type body: JSON + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + def add_face_list_face_from_url( + self, + face_list_id: str, + *, + url: str, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Face List, up to 1,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Face List + Face" or "Delete Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + def add_face_list_face_from_url( + self, + face_list_id: str, + body: IO[bytes], + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Face List, up to 1,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Face List + Face" or "Delete Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Required. + :type body: IO[bytes] + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @distributed_trace + def add_face_list_face_from_url( + self, + face_list_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Face List, up to 1,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Face List + Face" or "Delete Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + if body is _Unset: + if url is _Unset: + raise TypeError("missing required argument: url") + body = {"url": url} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_add_face_list_face_from_url_request( + face_list_id=face_list_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def add_face_list_face( + self, + face_list_id: str, + image_content: bytes, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Face List, up to 1,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Face List + Face" or "Delete Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param image_content: The image to be analyzed. Required. + :type image_content: bytes + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + _content = image_content + + _request = build_face_administration_add_face_list_face_request( + face_list_id=face_list_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_face_list_face( # pylint: disable=inconsistent-return-statements + self, face_list_id: str, persisted_face_id: str, **kwargs: Any + ) -> None: + """Delete a face from a Face List by specified faceListId and persistedFaceId. + + Adding/deleting faces to/from a same Face List are processed sequentially and to/from different + Face Lists are in parallel. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_face_list_face_request( + face_list_id=face_list_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @overload + def create_large_face_list( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + # pylint: disable=line-too-long + """Create an empty Large Face List with user-specified largeFaceListId, name, an optional userData + and recognitionModel. + + Large Face List is a list of faces, up to 1,000,000 faces, and used by "Find Similar From Large + Face List". + + After creation, user should use Add Large Face List Face to import the faces and Train Large + Face List to make it ready for "Find Similar". No image will be stored. Only the extracted face + feature(s) will be stored on server until Delete Large Face List is called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 64 Large Face Lists. + * S0-tier subscription quota: 1,000,000 Large Face Lists. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def create_large_face_list( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any, + ) -> None: + """Create an empty Large Face List with user-specified largeFaceListId, name, an optional userData + and recognitionModel. + + Large Face List is a list of faces, up to 1,000,000 faces, and used by "Find Similar From Large + Face List". + + After creation, user should use Add Large Face List Face to import the faces and Train Large + Face List to make it ready for "Find Similar". No image will be stored. Only the extracted face + feature(s) will be stored on server until Delete Large Face List is called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 64 Large Face Lists. + * S0-tier subscription quota: 1,000,000 Large Face Lists. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_large_face_list( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Create an empty Large Face List with user-specified largeFaceListId, name, an optional userData + and recognitionModel. + + Large Face List is a list of faces, up to 1,000,000 faces, and used by "Find Similar From Large + Face List". + + After creation, user should use Add Large Face List Face to import the faces and Train Large + Face List to make it ready for "Find Similar". No image will be stored. Only the extracted face + feature(s) will be stored on server until Delete Large Face List is called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 64 Large Face Lists. + * S0-tier subscription quota: 1,000,000 Large Face Lists. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_large_face_list( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any, + ) -> None: + # pylint: disable=line-too-long + """Create an empty Large Face List with user-specified largeFaceListId, name, an optional userData + and recognitionModel. + + Large Face List is a list of faces, up to 1,000,000 faces, and used by "Find Similar From Large + Face List". + + After creation, user should use Add Large Face List Face to import the faces and Train Large + Face List to make it ready for "Find Similar". No image will be stored. Only the extracted face + feature(s) will be stored on server until Delete Large Face List is called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 64 Large Face Lists. + * S0-tier subscription quota: 1,000,000 Large Face Lists. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "recognitionmodel": recognition_model, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_large_face_list_request( + large_face_list_id=large_face_list_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def delete_large_face_list( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, **kwargs: Any + ) -> None: + """Delete a face from a Large Face List by specified largeFaceListId and persistedFaceId. + + Adding/deleting faces to/from a same Large Face List are processed sequentially and to/from + different Large Face Lists are in parallel. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_large_face_list_request( + large_face_list_id=large_face_list_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_large_face_list( + self, large_face_list_id: str, *, return_recognition_model: Optional[bool] = None, **kwargs: Any + ) -> _models.LargeFaceList: + # pylint: disable=line-too-long + """Retrieve a Large Face List's largeFaceListId, name, userData and recognitionModel. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: LargeFaceList. The LargeFaceList is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.LargeFaceList + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "largeFaceListId": "str", # Valid character is letter in lower case or digit + or '-' or '_', maximum length is 64. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LargeFaceList] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_face_list_request( + large_face_list_id=large_face_list_id, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LargeFaceList, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_large_face_list( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update information of a Large Face List, including name and userData. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def update_large_face_list( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update information of a Large Face List, including name and userData. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_large_face_list( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update information of a Large Face List, including name and userData. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_large_face_list( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update information of a Large Face List, including name and userData. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_large_face_list_request( + large_face_list_id=large_face_list_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_large_face_lists( + self, + *, + start: Optional[str] = None, + top: Optional[int] = None, + return_recognition_model: Optional[bool] = None, + **kwargs: Any, + ) -> List[_models.LargeFaceList]: + # pylint: disable=line-too-long + """List Large Face Lists' information of largeFaceListId, name, userData and recognitionModel. + + To get face information inside largeFaceList use "Get Large Face List Face". + + Large Face Lists are stored in alphabetical order of largeFaceListId. + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: list of LargeFaceList + :rtype: list[~azure.ai.vision.face.models.LargeFaceList] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "largeFaceListId": "str", # Valid character is letter in lower case + or digit or '-' or '_', maximum length is 64. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LargeFaceList]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_face_lists_request( + start=start, + top=top, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LargeFaceList], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_large_face_list_training_status( + self, large_face_list_id: str, **kwargs: Any + ) -> _models.FaceCollectionTrainingResult: + # pylint: disable=line-too-long + """To check the Large Face List training status completed or still ongoing. Large Face List + training is an asynchronous operation triggered by "Train Large Face List". + + Training time depends on the number of face entries in a Large Face List. It could be in + seconds, or up to half an hour for 1,000,000 faces. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :return: FaceCollectionTrainingResult. The FaceCollectionTrainingResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceCollectionTrainingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "createdDateTime": "2020-02-20 00:00:00", # A combined UTC date and time + string that describes the created time of the person group, large person group or + large face list. Required. + "lastActionDateTime": "2020-02-20 00:00:00", # A combined UTC date and time + string that describes the last modify time of the person group, large person + group or large face list, could be null value when the group is not successfully + trained. Required. + "lastSuccessfulTrainingDateTime": "2020-02-20 00:00:00", # A combined UTC + date and time string that describes the last successful training time of the + person group, large person group or large face list. Required. + "status": "str", # Training status of the container. Required. Known values + are: "notStarted", "running", "succeeded", and "failed". + "message": "str" # Optional. Show failure message when training failed + (omitted when training succeed). + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FaceCollectionTrainingResult] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_face_list_training_status_request( + large_face_list_id=large_face_list_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceCollectionTrainingResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _train_large_face_list_initial( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, **kwargs: Any + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_train_large_face_list_request( + large_face_list_id=large_face_list_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def begin_train_large_face_list(self, large_face_list_id: str, **kwargs: Any) -> LROPoller[None]: + """Submit a Large Face List training task. + + Training is a crucial step that only a trained Large Face List can be used by "Find Similar + From Large Face List". + + The training task is an asynchronous task. Training time depends on the number of face entries + in a Large Face List. It could be in seconds, or up to half an hour for 1,000,000 faces. To + check training completion, please use "Get Large Face List Training Status". + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._train_large_face_list_initial( # type: ignore + large_face_list_id=large_face_list_id, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @overload + def add_large_face_list_face_from_url( + self, + large_face_list_id: str, + body: JSON, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Large Face List, up to 1,000,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large Face + List Face" or "Delete Large Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 faces per Large Face List. + * S0-tier subscription quota: 1,000,000 faces per Large Face List. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Required. + :type body: JSON + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + def add_large_face_list_face_from_url( + self, + large_face_list_id: str, + *, + url: str, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Large Face List, up to 1,000,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large Face + List Face" or "Delete Large Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 faces per Large Face List. + * S0-tier subscription quota: 1,000,000 faces per Large Face List. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + def add_large_face_list_face_from_url( + self, + large_face_list_id: str, + body: IO[bytes], + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Large Face List, up to 1,000,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large Face + List Face" or "Delete Large Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 faces per Large Face List. + * S0-tier subscription quota: 1,000,000 faces per Large Face List. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Required. + :type body: IO[bytes] + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @distributed_trace + def add_large_face_list_face_from_url( + self, + large_face_list_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Large Face List, up to 1,000,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large Face + List Face" or "Delete Large Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 faces per Large Face List. + * S0-tier subscription quota: 1,000,000 faces per Large Face List. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + if body is _Unset: + if url is _Unset: + raise TypeError("missing required argument: url") + body = {"url": url} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_add_large_face_list_face_from_url_request( + large_face_list_id=large_face_list_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def add_large_face_list_face( + self, + large_face_list_id: str, + image_content: bytes, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Large Face List, up to 1,000,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large Face + List Face" or "Delete Large Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 faces per Large Face List. + * S0-tier subscription quota: 1,000,000 faces per Large Face List. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param image_content: The image to be analyzed. Required. + :type image_content: bytes + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + _content = image_content + + _request = build_face_administration_add_large_face_list_face_request( + large_face_list_id=large_face_list_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_large_face_list_face( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, persisted_face_id: str, **kwargs: Any + ) -> None: + """Delete a face from a Large Face List by specified largeFaceListId and persistedFaceId. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_large_face_list_face_request( + large_face_list_id=large_face_list_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_large_face_list_face( + self, large_face_list_id: str, persisted_face_id: str, **kwargs: Any + ) -> _models.LargeFaceListFace: + """Retrieve persisted face in Large Face List by largeFaceListId and persistedFaceId. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: LargeFaceListFace. The LargeFaceListFace is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.LargeFaceListFace + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str", # Face ID of the face. Required. + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LargeFaceListFace] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_face_list_face_request( + large_face_list_id=large_face_list_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LargeFaceListFace, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_large_face_list_face( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + persisted_face_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Update a specified face's userData field in a Large Face List by its persistedFaceId. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + + @overload + def update_large_face_list_face( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + persisted_face_id: str, + *, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update a specified face's userData field in a Large Face List by its persistedFaceId. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_large_face_list_face( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + persisted_face_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Update a specified face's userData field in a Large Face List by its persistedFaceId. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_large_face_list_face( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + persisted_face_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update a specified face's userData field in a Large Face List by its persistedFaceId. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_large_face_list_face_request( + large_face_list_id=large_face_list_id, + persisted_face_id=persisted_face_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_large_face_list_faces( + self, large_face_list_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.LargeFaceListFace]: + """List faces' persistedFaceId and userData in a specified Large Face List. + + Faces are stored in alphabetical order of persistedFaceId created in "Add Large Face List + Face". + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of LargeFaceListFace + :rtype: list[~azure.ai.vision.face.models.LargeFaceListFace] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "persistedFaceId": "str", # Face ID of the face. Required. + "userData": "str" # Optional. User-provided data attached to the + face. The length limit is 1K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LargeFaceListFace]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_face_list_faces_request( + large_face_list_id=large_face_list_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LargeFaceListFace], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_person_group( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + # pylint: disable=line-too-long + """Create a new Person Group with specified personGroupId, name, user-provided userData and + recognitionModel. + + A Person Group is a container holding the uploaded person data, including face recognition + features. + + After creation, use "Create Person Group Person" to add persons into the group, and then call + "Train Person Group" to get this group ready for "Identify From Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Person Group Person" or "Delete Person Group" is called. + + 'recognitionModel' should be specified to associate with this Person Group. The default value + for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Person + Group will use the recognition model that's already associated with the collection. Existing + face feature(s) in a Person Group can't be updated to features extracted by another version of + recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Person Groups. Each holds up to 1,000 persons. + * S0-tier subscription quota: 1,000,000 Person Groups. Each holds up to 10,000 persons. + * to handle larger scale face identification problem, please consider using Large Person + Group. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def create_person_group( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any, + ) -> None: + """Create a new Person Group with specified personGroupId, name, user-provided userData and + recognitionModel. + + A Person Group is a container holding the uploaded person data, including face recognition + features. + + After creation, use "Create Person Group Person" to add persons into the group, and then call + "Train Person Group" to get this group ready for "Identify From Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Person Group Person" or "Delete Person Group" is called. + + 'recognitionModel' should be specified to associate with this Person Group. The default value + for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Person + Group will use the recognition model that's already associated with the collection. Existing + face feature(s) in a Person Group can't be updated to features extracted by another version of + recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Person Groups. Each holds up to 1,000 persons. + * S0-tier subscription quota: 1,000,000 Person Groups. Each holds up to 10,000 persons. + * to handle larger scale face identification problem, please consider using Large Person + Group. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_person_group( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Create a new Person Group with specified personGroupId, name, user-provided userData and + recognitionModel. + + A Person Group is a container holding the uploaded person data, including face recognition + features. + + After creation, use "Create Person Group Person" to add persons into the group, and then call + "Train Person Group" to get this group ready for "Identify From Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Person Group Person" or "Delete Person Group" is called. + + 'recognitionModel' should be specified to associate with this Person Group. The default value + for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Person + Group will use the recognition model that's already associated with the collection. Existing + face feature(s) in a Person Group can't be updated to features extracted by another version of + recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Person Groups. Each holds up to 1,000 persons. + * S0-tier subscription quota: 1,000,000 Person Groups. Each holds up to 10,000 persons. + * to handle larger scale face identification problem, please consider using Large Person + Group. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_person_group( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any, + ) -> None: + # pylint: disable=line-too-long + """Create a new Person Group with specified personGroupId, name, user-provided userData and + recognitionModel. + + A Person Group is a container holding the uploaded person data, including face recognition + features. + + After creation, use "Create Person Group Person" to add persons into the group, and then call + "Train Person Group" to get this group ready for "Identify From Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Person Group Person" or "Delete Person Group" is called. + + 'recognitionModel' should be specified to associate with this Person Group. The default value + for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Person + Group will use the recognition model that's already associated with the collection. Existing + face feature(s) in a Person Group can't be updated to features extracted by another version of + recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Person Groups. Each holds up to 1,000 persons. + * S0-tier subscription quota: 1,000,000 Person Groups. Each holds up to 10,000 persons. + * to handle larger scale face identification problem, please consider using Large Person + Group. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "recognitionmodel": recognition_model, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_person_group_request( + person_group_id=person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def delete_person_group( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, **kwargs: Any + ) -> None: + """Delete an existing Person Group with specified personGroupId. Persisted data in this Person + Group will be deleted. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_person_group_request( + person_group_id=person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_person_group( + self, person_group_id: str, *, return_recognition_model: Optional[bool] = None, **kwargs: Any + ) -> _models.PersonGroup: + # pylint: disable=line-too-long + """Retrieve Person Group name, userData and recognitionModel. To get person information under this + personGroup, use "Get Person Group Persons". + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: PersonGroup. The PersonGroup is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.PersonGroup + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "name": "str", # User defined name, maximum length is 128. Required. + "personGroupId": "str", # ID of the container. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PersonGroup] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_group_request( + person_group_id=person_group_id, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PersonGroup, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_person_group( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update an existing Person Group's name and userData. The properties keep unchanged if they are + not in request body. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def update_person_group( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update an existing Person Group's name and userData. The properties keep unchanged if they are + not in request body. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_person_group( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update an existing Person Group's name and userData. The properties keep unchanged if they are + not in request body. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_person_group( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update an existing Person Group's name and userData. The properties keep unchanged if they are + not in request body. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_person_group_request( + person_group_id=person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_person_groups( + self, + *, + start: Optional[str] = None, + top: Optional[int] = None, + return_recognition_model: Optional[bool] = None, + **kwargs: Any, + ) -> List[_models.PersonGroup]: + # pylint: disable=line-too-long + """List Person Groups' personGroupId, name, userData and recognitionModel. + + Person Groups are stored in alphabetical order of personGroupId. + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: list of PersonGroup + :rtype: list[~azure.ai.vision.face.models.PersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "name": "str", # User defined name, maximum length is 128. Required. + "personGroupId": "str", # ID of the container. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.PersonGroup]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_groups_request( + start=start, + top=top, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.PersonGroup], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_person_group_training_status( + self, person_group_id: str, **kwargs: Any + ) -> _models.FaceCollectionTrainingResult: + # pylint: disable=line-too-long + """To check Person Group training status completed or still ongoing. Person Group training is an + asynchronous operation triggered by "Train Person Group" API. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :return: FaceCollectionTrainingResult. The FaceCollectionTrainingResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceCollectionTrainingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "createdDateTime": "2020-02-20 00:00:00", # A combined UTC date and time + string that describes the created time of the person group, large person group or + large face list. Required. + "lastActionDateTime": "2020-02-20 00:00:00", # A combined UTC date and time + string that describes the last modify time of the person group, large person + group or large face list, could be null value when the group is not successfully + trained. Required. + "lastSuccessfulTrainingDateTime": "2020-02-20 00:00:00", # A combined UTC + date and time string that describes the last successful training time of the + person group, large person group or large face list. Required. + "status": "str", # Training status of the container. Required. Known values + are: "notStarted", "running", "succeeded", and "failed". + "message": "str" # Optional. Show failure message when training failed + (omitted when training succeed). + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FaceCollectionTrainingResult] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_group_training_status_request( + person_group_id=person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceCollectionTrainingResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _train_person_group_initial( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, **kwargs: Any + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_train_person_group_request( + person_group_id=person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def begin_train_person_group(self, person_group_id: str, **kwargs: Any) -> LROPoller[None]: + """Submit a Person Group training task. Training is a crucial step that only a trained Person + Group can be used by "Identify From Person Group". + + The training task is an asynchronous task. Training time depends on the number of person + entries, and their faces in a Person Group. It could be several seconds to minutes. To check + training status, please use "Get Person Group Training Status". + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._train_person_group_initial( # type: ignore + person_group_id=person_group_id, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @overload + def create_person_group_person( + self, person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreatePersonResult: + """Create a new person in a specified Person Group. To add face to this person, please call "Add + Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Person Groups. + + * S0-tier subscription quota: + + * 10,000 persons per Person Group. + * 1,000,000 Person Groups. + * 100,000,000 persons in all Person Groups. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @overload + def create_person_group_person( + self, + person_group_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any, + ) -> _models.CreatePersonResult: + """Create a new person in a specified Person Group. To add face to this person, please call "Add + Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Person Groups. + + * S0-tier subscription quota: + + * 10,000 persons per Person Group. + * 1,000,000 Person Groups. + * 100,000,000 persons in all Person Groups. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @overload + def create_person_group_person( + self, person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreatePersonResult: + """Create a new person in a specified Person Group. To add face to this person, please call "Add + Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Person Groups. + + * S0-tier subscription quota: + + * 10,000 persons per Person Group. + * 1,000,000 Person Groups. + * 100,000,000 persons in all Person Groups. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @distributed_trace + def create_person_group_person( + self, + person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> _models.CreatePersonResult: + """Create a new person in a specified Person Group. To add face to this person, please call "Add + Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Person Groups. + + * S0-tier subscription quota: + + * 10,000 persons per Person Group. + * 1,000,000 Person Groups. + * 100,000,000 persons in all Person Groups. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.CreatePersonResult] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_person_group_person_request( + person_group_id=person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.CreatePersonResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_person_group_person( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, person_id: str, **kwargs: Any + ) -> None: + """Delete an existing person from a Person Group. The persistedFaceId, userData, person name and + face feature(s) in the person entry will all be deleted. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_person_group_person_request( + person_group_id=person_group_id, + person_id=person_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_person_group_person(self, person_group_id: str, person_id: str, **kwargs: Any) -> _models.PersonGroupPerson: + """Retrieve a person's name and userData, and the persisted faceIds representing the registered + person face feature(s). + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :return: PersonGroupPerson. The PersonGroupPerson is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.PersonGroupPerson + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "name": "str", # User defined name, maximum length is 128. Required. + "personId": "str", # ID of the person. Required. + "persistedFaceIds": [ + "str" # Optional. Face ids of registered faces in the person. + ], + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PersonGroupPerson] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_group_person_request( + person_group_id=person_group_id, + person_id=person_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PersonGroupPerson, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_person_group_person( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, person_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def update_person_group_person( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update name or userData of a person. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_person_group_person( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Update name or userData of a person. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_person_group_person( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update name or userData of a person. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_person_group_person_request( + person_group_id=person_group_id, + person_id=person_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_person_group_persons( + self, person_group_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.PersonGroupPerson]: + """List all persons' information in the specified Person Group, including personId, name, userData + and persistedFaceIds of registered person faces. + + Persons are stored in alphabetical order of personId created in "Create Person Group Person". + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of PersonGroupPerson + :rtype: list[~azure.ai.vision.face.models.PersonGroupPerson] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "name": "str", # User defined name, maximum length is 128. Required. + "personId": "str", # ID of the person. Required. + "persistedFaceIds": [ + "str" # Optional. Face ids of registered faces in the + person. + ], + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.PersonGroupPerson]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_group_persons_request( + person_group_id=person_group_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.PersonGroupPerson], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def add_person_group_person_face_from_url( + self, + person_group_id: str, + person_id: str, + body: JSON, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Person + Group Person Face", "Delete Person Group Person" or "Delete Person Group" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: JSON + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + def add_person_group_person_face_from_url( + self, + person_group_id: str, + person_id: str, + *, + url: str, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Person + Group Person Face", "Delete Person Group Person" or "Delete Person Group" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + def add_person_group_person_face_from_url( + self, + person_group_id: str, + person_id: str, + body: IO[bytes], + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Person + Group Person Face", "Delete Person Group Person" or "Delete Person Group" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: IO[bytes] + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @distributed_trace + def add_person_group_person_face_from_url( + self, + person_group_id: str, + person_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Person + Group Person Face", "Delete Person Group Person" or "Delete Person Group" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + if body is _Unset: + if url is _Unset: + raise TypeError("missing required argument: url") + body = {"url": url} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_add_person_group_person_face_from_url_request( + person_group_id=person_group_id, + person_id=person_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def add_person_group_person_face( + self, + person_group_id: str, + person_id: str, + image_content: bytes, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Person + Group Person Face", "Delete Person Group Person" or "Delete Person Group" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param image_content: The image to be analyzed. Required. + :type image_content: bytes + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + _content = image_content + + _request = build_face_administration_add_person_group_person_face_request( + person_group_id=person_group_id, + person_id=person_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any + ) -> None: + """Delete a face from a person in a Person Group by specified personGroupId, personId and + persistedFaceId. + + Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_person_group_person_face_request( + person_group_id=person_group_id, + person_id=person_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_person_group_person_face( + self, person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any + ) -> _models.PersonGroupPersonFace: + """Retrieve person face information. The persisted person face is specified by its personGroupId, + personId and persistedFaceId. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: PersonGroupPersonFace. The PersonGroupPersonFace is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.PersonGroupPersonFace + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str", # Face ID of the face. Required. + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PersonGroupPersonFace] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_group_person_face_request( + person_group_id=person_group_id, + person_id=person_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PersonGroupPersonFace, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + persisted_face_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Update a person persisted face's userData field. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + + @overload + def update_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + persisted_face_id: str, + *, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update a person persisted face's userData field. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + persisted_face_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Update a person persisted face's userData field. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + persisted_face_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update a person persisted face's userData field. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_person_group_person_face_request( + person_group_id=person_group_id, + person_id=person_id, + persisted_face_id=persisted_face_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @overload + def create_large_person_group( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + # pylint: disable=line-too-long + """Create a new Large Person Group with user-specified largePersonGroupId, name, an optional + userData and recognitionModel. + + A Large Person Group is a container holding the uploaded person data, including the face + recognition features. It can hold up to 1,000,000 entities. + + After creation, use "Create Large Person Group Person" to add person into the group, and call + "Train Large Person Group" to get this group ready for "Identify From Large Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + 'recognitionModel' should be specified to associate with this Large Person Group. The default + value for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Large + Person Group will use the recognition model that's already associated with the collection. + Existing face feature(s) in a Large Person Group can't be updated to features extracted by + another version of recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Large Person Groups. + * S0-tier subscription quota: 1,000,000 Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def create_large_person_group( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any, + ) -> None: + """Create a new Large Person Group with user-specified largePersonGroupId, name, an optional + userData and recognitionModel. + + A Large Person Group is a container holding the uploaded person data, including the face + recognition features. It can hold up to 1,000,000 entities. + + After creation, use "Create Large Person Group Person" to add person into the group, and call + "Train Large Person Group" to get this group ready for "Identify From Large Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + 'recognitionModel' should be specified to associate with this Large Person Group. The default + value for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Large + Person Group will use the recognition model that's already associated with the collection. + Existing face feature(s) in a Large Person Group can't be updated to features extracted by + another version of recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Large Person Groups. + * S0-tier subscription quota: 1,000,000 Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_large_person_group( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Create a new Large Person Group with user-specified largePersonGroupId, name, an optional + userData and recognitionModel. + + A Large Person Group is a container holding the uploaded person data, including the face + recognition features. It can hold up to 1,000,000 entities. + + After creation, use "Create Large Person Group Person" to add person into the group, and call + "Train Large Person Group" to get this group ready for "Identify From Large Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + 'recognitionModel' should be specified to associate with this Large Person Group. The default + value for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Large + Person Group will use the recognition model that's already associated with the collection. + Existing face feature(s) in a Large Person Group can't be updated to features extracted by + another version of recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Large Person Groups. + * S0-tier subscription quota: 1,000,000 Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_large_person_group( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any, + ) -> None: + # pylint: disable=line-too-long + """Create a new Large Person Group with user-specified largePersonGroupId, name, an optional + userData and recognitionModel. + + A Large Person Group is a container holding the uploaded person data, including the face + recognition features. It can hold up to 1,000,000 entities. + + After creation, use "Create Large Person Group Person" to add person into the group, and call + "Train Large Person Group" to get this group ready for "Identify From Large Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + 'recognitionModel' should be specified to associate with this Large Person Group. The default + value for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Large + Person Group will use the recognition model that's already associated with the collection. + Existing face feature(s) in a Large Person Group can't be updated to features extracted by + another version of recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Large Person Groups. + * S0-tier subscription quota: 1,000,000 Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "recognitionmodel": recognition_model, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_large_person_group_request( + large_person_group_id=large_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def delete_large_person_group( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, **kwargs: Any + ) -> None: + """Delete an existing Large Person Group with specified personGroupId. Persisted data in this + Large Person Group will be deleted. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_large_person_group_request( + large_person_group_id=large_person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_large_person_group( + self, large_person_group_id: str, *, return_recognition_model: Optional[bool] = None, **kwargs: Any + ) -> _models.LargePersonGroup: + # pylint: disable=line-too-long + """Retrieve the information of a Large Person Group, including its name, userData and + recognitionModel. This API returns Large Person Group information only, use "Get Large Person + Group Persons" instead to retrieve person information under the Large Person Group. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: LargePersonGroup. The LargePersonGroup is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.LargePersonGroup + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "largePersonGroupId": "str", # ID of the container. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LargePersonGroup] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_person_group_request( + large_person_group_id=large_person_group_id, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LargePersonGroup, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_large_person_group( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update an existing Large Person Group's name and userData. The properties keep unchanged if + they are not in request body. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def update_large_person_group( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update an existing Large Person Group's name and userData. The properties keep unchanged if + they are not in request body. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_large_person_group( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update an existing Large Person Group's name and userData. The properties keep unchanged if + they are not in request body. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_large_person_group( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update an existing Large Person Group's name and userData. The properties keep unchanged if + they are not in request body. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_large_person_group_request( + large_person_group_id=large_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_large_person_groups( + self, + *, + start: Optional[str] = None, + top: Optional[int] = None, + return_recognition_model: Optional[bool] = None, + **kwargs: Any, + ) -> List[_models.LargePersonGroup]: + # pylint: disable=line-too-long + """List all existing Large Person Groups' largePersonGroupId, name, userData and recognitionModel. + + Large Person Groups are stored in alphabetical order of largePersonGroupId. + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: list of LargePersonGroup + :rtype: list[~azure.ai.vision.face.models.LargePersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "largePersonGroupId": "str", # ID of the container. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LargePersonGroup]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_person_groups_request( + start=start, + top=top, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LargePersonGroup], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_large_person_group_training_status( + self, large_person_group_id: str, **kwargs: Any + ) -> _models.FaceCollectionTrainingResult: + # pylint: disable=line-too-long + """To check Large Person Group training status completed or still ongoing. Large Person Group + training is an asynchronous operation triggered by "Train Large Person Group" API. + + Training time depends on the number of person entries, and their faces in a Large Person Group. + It could be in seconds, or up to half an hour for 1,000,000 persons. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :return: FaceCollectionTrainingResult. The FaceCollectionTrainingResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceCollectionTrainingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "createdDateTime": "2020-02-20 00:00:00", # A combined UTC date and time + string that describes the created time of the person group, large person group or + large face list. Required. + "lastActionDateTime": "2020-02-20 00:00:00", # A combined UTC date and time + string that describes the last modify time of the person group, large person + group or large face list, could be null value when the group is not successfully + trained. Required. + "lastSuccessfulTrainingDateTime": "2020-02-20 00:00:00", # A combined UTC + date and time string that describes the last successful training time of the + person group, large person group or large face list. Required. + "status": "str", # Training status of the container. Required. Known values + are: "notStarted", "running", "succeeded", and "failed". + "message": "str" # Optional. Show failure message when training failed + (omitted when training succeed). + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FaceCollectionTrainingResult] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_person_group_training_status_request( + large_person_group_id=large_person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceCollectionTrainingResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _train_large_person_group_initial( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, **kwargs: Any + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_train_large_person_group_request( + large_person_group_id=large_person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def begin_train_large_person_group(self, large_person_group_id: str, **kwargs: Any) -> LROPoller[None]: + """Submit a Large Person Group training task. Training is a crucial step that only a trained Large + Person Group can be used by "Identify From Large Person Group". + + The training task is an asynchronous task. Training time depends on the number of person + entries, and their faces in a Large Person Group. It could be in several seconds, or up to half + a hour for 1,000,000 persons. To check training status, please use "Get Large Person Group + Training Status". + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._train_large_person_group_initial( # type: ignore + large_person_group_id=large_person_group_id, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs, + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @overload + def create_large_person_group_person( + self, large_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreatePersonResult: + """Create a new person in a specified Large Person Group. To add face to this person, please call + "Add Large Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Large Person Groups. + + * S0-tier subscription quota: + + * 1,000,000 persons per Large Person Group. + * 1,000,000 Large Person Groups. + * 1,000,000,000 persons in all Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @overload + def create_large_person_group_person( + self, + large_person_group_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any, + ) -> _models.CreatePersonResult: + """Create a new person in a specified Large Person Group. To add face to this person, please call + "Add Large Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Large Person Groups. + + * S0-tier subscription quota: + + * 1,000,000 persons per Large Person Group. + * 1,000,000 Large Person Groups. + * 1,000,000,000 persons in all Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @overload + def create_large_person_group_person( + self, large_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreatePersonResult: + """Create a new person in a specified Large Person Group. To add face to this person, please call + "Add Large Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Large Person Groups. + + * S0-tier subscription quota: + + * 1,000,000 persons per Large Person Group. + * 1,000,000 Large Person Groups. + * 1,000,000,000 persons in all Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @distributed_trace + def create_large_person_group_person( + self, + large_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> _models.CreatePersonResult: + """Create a new person in a specified Large Person Group. To add face to this person, please call + "Add Large Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Large Person Groups. + + * S0-tier subscription quota: + + * 1,000,000 persons per Large Person Group. + * 1,000,000 Large Person Groups. + * 1,000,000,000 persons in all Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.CreatePersonResult] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_large_person_group_person_request( + large_person_group_id=large_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.CreatePersonResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_large_person_group_person( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, person_id: str, **kwargs: Any + ) -> None: + """Delete an existing person from a Large Person Group. The persistedFaceId, userData, person name + and face feature(s) in the person entry will all be deleted. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_large_person_group_person_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_large_person_group_person( + self, large_person_group_id: str, person_id: str, **kwargs: Any + ) -> _models.LargePersonGroupPerson: + """Retrieve a person's name and userData, and the persisted faceIds representing the registered + person face feature(s). + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :return: LargePersonGroupPerson. The LargePersonGroupPerson is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.LargePersonGroupPerson + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "name": "str", # User defined name, maximum length is 128. Required. + "personId": "str", # ID of the person. Required. + "persistedFaceIds": [ + "str" # Optional. Face ids of registered faces in the person. + ], + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LargePersonGroupPerson] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_person_group_person_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LargePersonGroupPerson, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_large_person_group_person( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Update name or userData of a person. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def update_large_person_group_person( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update name or userData of a person. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_large_person_group_person( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Update name or userData of a person. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_large_person_group_person( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update name or userData of a person. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_large_person_group_person_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_large_person_group_persons( + self, large_person_group_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.LargePersonGroupPerson]: + """List all persons' information in the specified Large Person Group, including personId, name, + userData and persistedFaceIds of registered person faces. + + Persons are stored in alphabetical order of personId created in "Create Large Person Group + Person". + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of LargePersonGroupPerson + :rtype: list[~azure.ai.vision.face.models.LargePersonGroupPerson] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "name": "str", # User defined name, maximum length is 128. Required. + "personId": "str", # ID of the person. Required. + "persistedFaceIds": [ + "str" # Optional. Face ids of registered faces in the + person. + ], + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LargePersonGroupPerson]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_person_group_persons_request( + large_person_group_id=large_person_group_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LargePersonGroupPerson], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def add_large_person_group_person_face_from_url( # pylint: disable=name-too-long + self, + large_person_group_id: str, + person_id: str, + body: JSON, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Large Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large + Person Group Person Face", "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: JSON + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + def add_large_person_group_person_face_from_url( # pylint: disable=name-too-long + self, + large_person_group_id: str, + person_id: str, + *, + url: str, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Large Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large + Person Group Person Face", "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + def add_large_person_group_person_face_from_url( # pylint: disable=name-too-long + self, + large_person_group_id: str, + person_id: str, + body: IO[bytes], + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Large Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large + Person Group Person Face", "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: IO[bytes] + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @distributed_trace + def add_large_person_group_person_face_from_url( # pylint: disable=name-too-long + self, + large_person_group_id: str, + person_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Large Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large + Person Group Person Face", "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + if body is _Unset: + if url is _Unset: + raise TypeError("missing required argument: url") + body = {"url": url} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_add_large_person_group_person_face_from_url_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def add_large_person_group_person_face( + self, + large_person_group_id: str, + person_id: str, + image_content: bytes, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Large Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large + Person Group Person Face", "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param image_content: The image to be analyzed. Required. + :type image_content: bytes + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + _content = image_content + + _request = build_face_administration_add_large_person_group_person_face_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_large_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any + ) -> None: + """Delete a face from a person in a Large Person Group by specified largePersonGroupId, personId + and persistedFaceId. + + Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_large_person_group_person_face_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_large_person_group_person_face( + self, large_person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any + ) -> _models.LargePersonGroupPersonFace: + """Retrieve person face information. The persisted person face is specified by its + largePersonGroupId, personId and persistedFaceId. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: LargePersonGroupPersonFace. The LargePersonGroupPersonFace is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.LargePersonGroupPersonFace + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str", # Face ID of the face. Required. + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LargePersonGroupPersonFace] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_person_group_person_face_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LargePersonGroupPersonFace, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_large_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + persisted_face_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Update a person persisted face's userData field. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + + @overload + def update_large_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + persisted_face_id: str, + *, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update a person persisted face's userData field. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_large_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + persisted_face_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Update a person persisted face's userData field. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_large_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + persisted_face_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update a person persisted face's userData field. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_large_person_group_person_face_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + persisted_face_id=persisted_face_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + def _create_person_initial( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> JSON: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_person_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = _deserialize(JSON, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_create_person( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> LROPoller[_models.CreatePersonResult]: + """Creates a new person in a Person Directory. To add face to this person, please call Person + Directory "Add Person Face". + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns CreatePersonResult. The CreatePersonResult is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.ai.vision.face.models.CreatePersonResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 202 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @overload + def begin_create_person( + self, *, name: str, content_type: str = "application/json", user_data: Optional[str] = None, **kwargs: Any + ) -> LROPoller[_models.CreatePersonResult]: + """Creates a new person in a Person Directory. To add face to this person, please call Person + Directory "Add Person Face". + + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: An instance of LROPoller that returns CreatePersonResult. The CreatePersonResult is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.ai.vision.face.models.CreatePersonResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @overload + def begin_create_person( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> LROPoller[_models.CreatePersonResult]: + """Creates a new person in a Person Directory. To add face to this person, please call Person + Directory "Add Person Face". + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns CreatePersonResult. The CreatePersonResult is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.ai.vision.face.models.CreatePersonResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @distributed_trace + def begin_create_person( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> LROPoller[_models.CreatePersonResult]: + """Creates a new person in a Person Directory. To add face to this person, please call Person + Directory "Add Person Face". + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: An instance of LROPoller that returns CreatePersonResult. The CreatePersonResult is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.ai.vision.face.models.CreatePersonResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 202 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.CreatePersonResult] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_person_initial( + body=body, + name=name, + user_data=user_data, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs, + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["operation-Location"] = self._deserialize( + "str", response.headers.get("operation-Location") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = _deserialize(_models.CreatePersonResult, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.CreatePersonResult].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.CreatePersonResult]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _delete_person_initial( # pylint: disable=inconsistent-return-statements + self, person_id: str, **kwargs: Any + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_person_request( + person_id=person_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def begin_delete_person(self, person_id: str, **kwargs: Any) -> LROPoller[None]: + """Delete an existing person from Person Directory. The persistedFaceId(s), userData, person name + and face feature(s) in the person entry will all be deleted. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_person_initial( # type: ignore + person_id=person_id, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def get_person(self, person_id: str, **kwargs: Any) -> _models.PersonDirectoryPerson: + """Retrieve a person's name and userData from Person Directory. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :return: PersonDirectoryPerson. The PersonDirectoryPerson is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.PersonDirectoryPerson + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "name": "str", # User defined name, maximum length is 128. Required. + "personId": "str", # Person ID of the person. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PersonDirectoryPerson] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_request( + person_id=person_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PersonDirectoryPerson, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_person( # pylint: disable=inconsistent-return-statements + self, person_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def update_person( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update name or userData of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_person( # pylint: disable=inconsistent-return-statements + self, person_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_person( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update name or userData of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_person_request( + person_id=person_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_persons( + self, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.PersonDirectoryPerson]: + """List all persons' information in Person Directory, including personId, name, and userData. + + Persons are stored in alphabetical order of personId created in Person Directory "Create + Person". + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of PersonDirectoryPerson + :rtype: list[~azure.ai.vision.face.models.PersonDirectoryPerson] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "name": "str", # User defined name, maximum length is 128. Required. + "personId": "str", # Person ID of the person. Required. + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.PersonDirectoryPerson]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_persons_request( + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.PersonDirectoryPerson], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_dynamic_person_group_references( + self, person_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> _models.ListGroupReferenceResult: + """List all Dynamic Person Groups a person has been referenced by in Person Directory. + + Dynamic Person Groups are stored in alphabetical order of Dynamic Person Group ID created in + Person Directory "Create Dynamic Person Group". + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :param person_id: Person ID of the person. Required. + :type person_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: ListGroupReferenceResult. The ListGroupReferenceResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.ListGroupReferenceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "dynamicPersonGroupIds": [ + "str" # Array of PersonDirectory DynamicPersonGroup ids. Required. + ] + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListGroupReferenceResult] = kwargs.pop("cls", None) + + _request = build_face_administration_get_dynamic_person_group_references_request( + person_id=person_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListGroupReferenceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _add_person_face_from_url_initial( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> JSON: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + if body is _Unset: + if url is _Unset: + raise TypeError("missing required argument: url") + body = {"url": url} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_add_person_face_from_url_request( + person_id=person_id, + recognition_model=recognition_model, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = _deserialize(JSON, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_add_person_face_from_url( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + body: JSON, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> LROPoller[_models.AddFaceResult]: + # pylint: disable=line-too-long + """Add a face to a person (see Person Directory "Create Person") for face identification or + verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until Person Directory + "Delete Person Face" or "Delete Person" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + * + * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + * This is a long running operation. Use Response Header "Operation-Location" to determine when + the AddFace operation has successfully propagated for future requests to "Identify". For + further information about Operation-Locations see "Get Face Operation Status". + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param body: Required. + :type body: JSON + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns AddFaceResult. The AddFaceResult is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.ai.vision.face.models.AddFaceResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 202 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + def begin_add_person_face_from_url( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + *, + url: str, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> LROPoller[_models.AddFaceResult]: + # pylint: disable=line-too-long + """Add a face to a person (see Person Directory "Create Person") for face identification or + verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until Person Directory + "Delete Person Face" or "Delete Person" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + * + * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + * This is a long running operation. Use Response Header "Operation-Location" to determine when + the AddFace operation has successfully propagated for future requests to "Identify". For + further information about Operation-Locations see "Get Face Operation Status". + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns AddFaceResult. The AddFaceResult is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.ai.vision.face.models.AddFaceResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + def begin_add_person_face_from_url( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + body: IO[bytes], + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> LROPoller[_models.AddFaceResult]: + # pylint: disable=line-too-long + """Add a face to a person (see Person Directory "Create Person") for face identification or + verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until Person Directory + "Delete Person Face" or "Delete Person" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + * + * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + * This is a long running operation. Use Response Header "Operation-Location" to determine when + the AddFace operation has successfully propagated for future requests to "Identify". For + further information about Operation-Locations see "Get Face Operation Status". + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param body: Required. + :type body: IO[bytes] + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns AddFaceResult. The AddFaceResult is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.ai.vision.face.models.AddFaceResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @distributed_trace + def begin_add_person_face_from_url( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> LROPoller[_models.AddFaceResult]: + # pylint: disable=line-too-long + """Add a face to a person (see Person Directory "Create Person") for face identification or + verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until Person Directory + "Delete Person Face" or "Delete Person" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + * + * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + * This is a long running operation. Use Response Header "Operation-Location" to determine when + the AddFace operation has successfully propagated for future requests to "Identify". For + further information about Operation-Locations see "Get Face Operation Status". + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: An instance of LROPoller that returns AddFaceResult. The AddFaceResult is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.ai.vision.face.models.AddFaceResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 202 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._add_person_face_from_url_initial( + person_id=person_id, + recognition_model=recognition_model, + body=body, + url=url, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs, + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["operation-Location"] = self._deserialize( + "str", response.headers.get("operation-Location") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = _deserialize(_models.AddFaceResult, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.AddFaceResult].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.AddFaceResult]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _add_person_face_initial( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + image_content: bytes, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> JSON: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _content = image_content + + _request = build_face_administration_add_person_face_request( + person_id=person_id, + recognition_model=recognition_model, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = _deserialize(JSON, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_add_person_face( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + image_content: bytes, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> LROPoller[_models.AddFaceResult]: + # pylint: disable=line-too-long + """Add a face to a person (see Person Directory "Create Person") for face identification or + verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until Person Directory + "Delete Person Face" or "Delete Person" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + * + * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + * This is a long running operation. Use Response Header "Operation-Location" to determine when + the AddFace operation has successfully propagated for future requests to "Identify". For + further information about Operation-Locations see "Get Face Operation Status". + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param image_content: The image to be analyzed. Required. + :type image_content: bytes + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: An instance of LROPoller that returns AddFaceResult. The AddFaceResult is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.ai.vision.face.models.AddFaceResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._add_person_face_initial( + person_id=person_id, + recognition_model=recognition_model, + image_content=image_content, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs, + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["operation-Location"] = self._deserialize( + "str", response.headers.get("operation-Location") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = _deserialize(_models.AddFaceResult, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.AddFaceResult].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.AddFaceResult]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _delete_person_face_initial( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + **kwargs: Any, + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_person_face_request( + person_id=person_id, + recognition_model=recognition_model, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def begin_delete_person_face( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + **kwargs: Any, + ) -> LROPoller[None]: + """Delete a face from a person in Person Directory by specified personId and persistedFaceId. + + Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_person_face_initial( # type: ignore + person_id=person_id, + recognition_model=recognition_model, + persisted_face_id=persisted_face_id, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs, + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def get_person_face( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + **kwargs: Any, + ) -> _models.PersonDirectoryFace: + """Retrieve person face information. The persisted person face is specified by its personId. + recognitionModel, and persistedFaceId. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: PersonDirectoryFace. The PersonDirectoryFace is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.PersonDirectoryFace + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str", # Face ID of the face. Required. + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PersonDirectoryFace] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_face_request( + person_id=person_id, + recognition_model=recognition_model, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PersonDirectoryFace, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_person_face( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Update a persisted face's userData field of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + + @overload + def update_person_face( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + *, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update a persisted face's userData field of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_person_face( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Update a persisted face's userData field of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_person_face( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update a persisted face's userData field of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_person_face_request( + person_id=person_id, + recognition_model=recognition_model, + persisted_face_id=persisted_face_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_person_faces( + self, person_id: str, recognition_model: Union[str, _models.FaceRecognitionModel], **kwargs: Any + ) -> _models.ListFaceResult: + """Retrieve a person's persistedFaceIds representing the registered person face feature(s). + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: ListFaceResult. The ListFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.ListFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceIds": [ + "str" # Array of persisted face ids. Required. + ], + "personId": "str" # Id of person. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListFaceResult] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_faces_request( + person_id=person_id, + recognition_model=recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _create_dynamic_person_group_with_person_initial( # pylint: disable=inconsistent-return-statements,name-too-long + self, + dynamic_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + add_person_ids: List[str] = _Unset, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + if add_person_ids is _Unset: + raise TypeError("missing required argument: add_person_ids") + body = {"addpersonids": add_person_ids, "name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_dynamic_person_group_with_person_request( + dynamic_person_group_id=dynamic_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @overload + def begin_create_dynamic_person_group_with_person( # pylint: disable=name-too-long + self, dynamic_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> LROPoller[_models.DynamicPersonGroup]: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns DynamicPersonGroup. The DynamicPersonGroup is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.ai.vision.face.models.DynamicPersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "addPersonIds": [ + "str" # Array of personIds created by Person Directory "Create + Person" to be added. Required. + ], + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 202 + response == { + "dynamicPersonGroupId": "str", # ID of the dynamic person group. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def begin_create_dynamic_person_group_with_person( # pylint: disable=name-too-long + self, + dynamic_person_group_id: str, + *, + name: str, + add_person_ids: List[str], + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any, + ) -> LROPoller[_models.DynamicPersonGroup]: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword add_person_ids: Array of personIds created by Person Directory "Create Person" to be + added. Required. + :paramtype add_person_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: An instance of LROPoller that returns DynamicPersonGroup. The DynamicPersonGroup is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.ai.vision.face.models.DynamicPersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "dynamicPersonGroupId": "str", # ID of the dynamic person group. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def begin_create_dynamic_person_group_with_person( # pylint: disable=name-too-long + self, dynamic_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> LROPoller[_models.DynamicPersonGroup]: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns DynamicPersonGroup. The DynamicPersonGroup is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.ai.vision.face.models.DynamicPersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "dynamicPersonGroupId": "str", # ID of the dynamic person group. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @distributed_trace + def begin_create_dynamic_person_group_with_person( # pylint: disable=name-too-long + self, + dynamic_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + add_person_ids: List[str] = _Unset, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> LROPoller[_models.DynamicPersonGroup]: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword add_person_ids: Array of personIds created by Person Directory "Create Person" to be + added. Required. + :paramtype add_person_ids: list[str] + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: An instance of LROPoller that returns DynamicPersonGroup. The DynamicPersonGroup is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.ai.vision.face.models.DynamicPersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "addPersonIds": [ + "str" # Array of personIds created by Person Directory "Create + Person" to be added. Required. + ], + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 202 + response == { + "dynamicPersonGroupId": "str", # ID of the dynamic person group. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DynamicPersonGroup] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_dynamic_person_group_with_person_initial( # type: ignore + dynamic_person_group_id=dynamic_person_group_id, + body=body, + name=name, + add_person_ids=add_person_ids, + user_data=user_data, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs, + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["operation-Location"] = self._deserialize( + "str", response.headers.get("operation-Location") + ) + + deserialized = _deserialize(_models.DynamicPersonGroup, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.DynamicPersonGroup].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.DynamicPersonGroup]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + @overload + def create_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, dynamic_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def create_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, + dynamic_person_group_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, dynamic_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, + dynamic_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_dynamic_person_group_request( + dynamic_person_group_id=dynamic_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + def _delete_dynamic_person_group_initial( # pylint: disable=inconsistent-return-statements + self, dynamic_person_group_id: str, **kwargs: Any + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_dynamic_person_group_request( + dynamic_person_group_id=dynamic_person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def begin_delete_dynamic_person_group(self, dynamic_person_group_id: str, **kwargs: Any) -> LROPoller[None]: + """Deletes an existing Dynamic Person Group with specified dynamicPersonGroupId. + + Deleting this Dynamic Person Group only delete the references to persons data. To delete actual + person see Person Directory "Delete Person". + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_dynamic_person_group_initial( # type: ignore + dynamic_person_group_id=dynamic_person_group_id, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs, + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def get_dynamic_person_group(self, dynamic_person_group_id: str, **kwargs: Any) -> _models.DynamicPersonGroup: + """Retrieve the information of a Dynamic Person Group, including its name and userData. + + This API returns Dynamic Person Group information only, use Person Directory "Get Dynamic + Person Group Persons" instead to retrieve person information under the Dynamic Person Group. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :return: DynamicPersonGroup. The DynamicPersonGroup is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.DynamicPersonGroup + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "dynamicPersonGroupId": "str", # ID of the dynamic person group. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.DynamicPersonGroup] = kwargs.pop("cls", None) + + _request = build_face_administration_get_dynamic_person_group_request( + dynamic_person_group_id=dynamic_person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.DynamicPersonGroup, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _update_dynamic_person_group_with_person_changes_initial( # pylint: disable=inconsistent-return-statements,name-too-long + self, + dynamic_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + add_person_ids: Optional[List[str]] = None, + remove_person_ids: Optional[List[str]] = None, + **kwargs: Any, + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "addpersonids": add_person_ids, + "name": name, + "removepersonids": remove_person_ids, + "userdata": user_data, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_dynamic_person_group_with_person_changes_request( + dynamic_person_group_id=dynamic_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @overload + def begin_update_dynamic_person_group_with_person_changes( # pylint: disable=name-too-long + self, dynamic_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> LROPoller[None]: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "addPersonIds": [ + "str" # Optional. Array of personIds created by Person Directory + "Create Person" to be added. + ], + "name": "str", # Optional. User defined name, maximum length is 128. + "removePersonIds": [ + "str" # Optional. Array of personIds created by Person Directory + "Create Person" to be removed. + ], + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def begin_update_dynamic_person_group_with_person_changes( # pylint: disable=name-too-long + self, + dynamic_person_group_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + add_person_ids: Optional[List[str]] = None, + remove_person_ids: Optional[List[str]] = None, + **kwargs: Any, + ) -> LROPoller[None]: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword add_person_ids: Array of personIds created by Person Directory "Create Person" to be + added. Default value is None. + :paramtype add_person_ids: list[str] + :keyword remove_person_ids: Array of personIds created by Person Directory "Create Person" to + be removed. Default value is None. + :paramtype remove_person_ids: list[str] + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update_dynamic_person_group_with_person_changes( # pylint: disable=name-too-long + self, dynamic_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> LROPoller[None]: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_update_dynamic_person_group_with_person_changes( # pylint: disable=name-too-long + self, + dynamic_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + add_person_ids: Optional[List[str]] = None, + remove_person_ids: Optional[List[str]] = None, + **kwargs: Any, + ) -> LROPoller[None]: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword add_person_ids: Array of personIds created by Person Directory "Create Person" to be + added. Default value is None. + :paramtype add_person_ids: list[str] + :keyword remove_person_ids: Array of personIds created by Person Directory "Create Person" to + be removed. Default value is None. + :paramtype remove_person_ids: list[str] + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "addPersonIds": [ + "str" # Optional. Array of personIds created by Person Directory + "Create Person" to be added. + ], + "name": "str", # Optional. User defined name, maximum length is 128. + "removePersonIds": [ + "str" # Optional. Array of personIds created by Person Directory + "Create Person" to be removed. + ], + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_dynamic_person_group_with_person_changes_initial( # type: ignore + dynamic_person_group_id=dynamic_person_group_id, + body=body, + name=name, + user_data=user_data, + add_person_ids=add_person_ids, + remove_person_ids=remove_person_ids, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs, + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @overload + def update_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, dynamic_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + def update_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, + dynamic_person_group_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, dynamic_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, + dynamic_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_dynamic_person_group_request( + dynamic_person_group_id=dynamic_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_dynamic_person_groups( + self, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.DynamicPersonGroup]: + """List all existing Dynamic Person Groups by dynamicPersonGroupId along with name and userData. + + Dynamic Person Groups are stored in alphabetical order of dynamicPersonGroupId. + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of DynamicPersonGroup + :rtype: list[~azure.ai.vision.face.models.DynamicPersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "dynamicPersonGroupId": "str", # ID of the dynamic person group. + Required. + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.DynamicPersonGroup]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_dynamic_person_groups_request( + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.DynamicPersonGroup], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_dynamic_person_group_persons( + self, dynamic_person_group_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> _models.ListPersonResult: + """List all persons in the specified Dynamic Person Group. + + Persons are stored in alphabetical order of personId created in Person Directory "Create + Person". + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: ListPersonResult. The ListPersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.ListPersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "personIds": [ + "str" # Array of PersonDirectory Person ids. Required. + ] + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListPersonResult] = kwargs.pop("cls", None) + + _request = build_face_administration_get_dynamic_person_group_persons_request( + dynamic_person_group_id=dynamic_person_group_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListPersonResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class FaceSessionClientOperationsMixin(FaceSessionClientMixinABC): + + @overload + def create_liveness_session( + self, body: _models.CreateLivenessSessionContent, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreateLivenessSessionResult: + # pylint: disable=line-too-long + """Create a new detect liveness session. + + A session is best for client device scenarios where developers want to authorize a client + device to perform only a liveness detection without granting full access to their resource. + Created sessions have a limited life span and only authorize clients to perform the desired + action before access is expired. + + Permissions includes... + > + * + + + * Ability to call /detectLiveness/singleModal for up to 3 retries. + * A token lifetime of 10 minutes. + + .. + + [!NOTE] + Client access can be revoked by deleting the session using the Delete Liveness Session + operation. To retrieve a result, use the Get Liveness Session. To audit the individual requests + that a client has made to your resource, use the List Liveness Session Audit Entries. + + :param body: Required. + :type body: ~azure.ai.vision.face.models.CreateLivenessSessionContent + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: CreateLivenessSessionResult. The CreateLivenessSessionResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.CreateLivenessSessionResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "livenessOperationMode": "str", # Type of liveness mode the client should + follow. Required. "Passive" + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session should + last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each end-user + device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "deviceCorrelationIdSetInClient": bool, # Optional. Whether or not to allow + client to set their own 'deviceCorrelationId' via the Vision SDK. Default is + false, and 'deviceCorrelationId' must be set in this request body. + "sendResultsToClient": bool # Optional. Whether or not to allow a '200 - + Success' response body to be sent to the client, which may be undesirable for + security reasons. Default is false, clients will receive a '204 - NoContent' + empty body response. Regardless of selection, calling Session GetResult will + always contain a response body enabling business logic to be implemented. + } + + # response body for status code(s): 200 + response == { + "authToken": "str", # Bearer token to provide authentication for the Vision + SDK running on a client application. This Bearer token has limited permissions to + perform only the required action and expires after the TTL time. It is also + auditable. Required. + "sessionId": "str" # The unique session ID of the created session. It will + expire 48 hours after it was created or may be deleted sooner using the + corresponding Session DELETE operation. Required. + } + """ + + @overload + def create_liveness_session( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreateLivenessSessionResult: + # pylint: disable=line-too-long + """Create a new detect liveness session. + + A session is best for client device scenarios where developers want to authorize a client + device to perform only a liveness detection without granting full access to their resource. + Created sessions have a limited life span and only authorize clients to perform the desired + action before access is expired. + + Permissions includes... + > + * + + + * Ability to call /detectLiveness/singleModal for up to 3 retries. + * A token lifetime of 10 minutes. + + .. + + [!NOTE] + Client access can be revoked by deleting the session using the Delete Liveness Session + operation. To retrieve a result, use the Get Liveness Session. To audit the individual requests + that a client has made to your resource, use the List Liveness Session Audit Entries. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: CreateLivenessSessionResult. The CreateLivenessSessionResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.CreateLivenessSessionResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "authToken": "str", # Bearer token to provide authentication for the Vision + SDK running on a client application. This Bearer token has limited permissions to + perform only the required action and expires after the TTL time. It is also + auditable. Required. + "sessionId": "str" # The unique session ID of the created session. It will + expire 48 hours after it was created or may be deleted sooner using the + corresponding Session DELETE operation. Required. + } + """ + + @overload + def create_liveness_session( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreateLivenessSessionResult: + # pylint: disable=line-too-long + """Create a new detect liveness session. + + A session is best for client device scenarios where developers want to authorize a client + device to perform only a liveness detection without granting full access to their resource. + Created sessions have a limited life span and only authorize clients to perform the desired + action before access is expired. + + Permissions includes... + > + * + + + * Ability to call /detectLiveness/singleModal for up to 3 retries. + * A token lifetime of 10 minutes. + + .. + + [!NOTE] + Client access can be revoked by deleting the session using the Delete Liveness Session + operation. To retrieve a result, use the Get Liveness Session. To audit the individual requests + that a client has made to your resource, use the List Liveness Session Audit Entries. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: CreateLivenessSessionResult. The CreateLivenessSessionResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.CreateLivenessSessionResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "authToken": "str", # Bearer token to provide authentication for the Vision + SDK running on a client application. This Bearer token has limited permissions to + perform only the required action and expires after the TTL time. It is also + auditable. Required. + "sessionId": "str" # The unique session ID of the created session. It will + expire 48 hours after it was created or may be deleted sooner using the + corresponding Session DELETE operation. Required. + } + """ + + @distributed_trace + def create_liveness_session( + self, body: Union[_models.CreateLivenessSessionContent, JSON, IO[bytes]], **kwargs: Any + ) -> _models.CreateLivenessSessionResult: + # pylint: disable=line-too-long + """Create a new detect liveness session. + + A session is best for client device scenarios where developers want to authorize a client + device to perform only a liveness detection without granting full access to their resource. + Created sessions have a limited life span and only authorize clients to perform the desired + action before access is expired. + + Permissions includes... + > + * + + + * Ability to call /detectLiveness/singleModal for up to 3 retries. + * A token lifetime of 10 minutes. + + .. + + [!NOTE] + Client access can be revoked by deleting the session using the Delete Liveness Session + operation. To retrieve a result, use the Get Liveness Session. To audit the individual requests + that a client has made to your resource, use the List Liveness Session Audit Entries. + + :param body: Is one of the following types: CreateLivenessSessionContent, JSON, IO[bytes] + Required. + :type body: ~azure.ai.vision.face.models.CreateLivenessSessionContent or JSON or IO[bytes] + :return: CreateLivenessSessionResult. The CreateLivenessSessionResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.CreateLivenessSessionResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "livenessOperationMode": "str", # Type of liveness mode the client should + follow. Required. "Passive" + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session should + last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each end-user + device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "deviceCorrelationIdSetInClient": bool, # Optional. Whether or not to allow + client to set their own 'deviceCorrelationId' via the Vision SDK. Default is + false, and 'deviceCorrelationId' must be set in this request body. + "sendResultsToClient": bool # Optional. Whether or not to allow a '200 - + Success' response body to be sent to the client, which may be undesirable for + security reasons. Default is false, clients will receive a '204 - NoContent' + empty body response. Regardless of selection, calling Session GetResult will + always contain a response body enabling business logic to be implemented. + } + + # response body for status code(s): 200 + response == { + "authToken": "str", # Bearer token to provide authentication for the Vision + SDK running on a client application. This Bearer token has limited permissions to + perform only the required action and expires after the TTL time. It is also + auditable. Required. + "sessionId": "str" # The unique session ID of the created session. It will + expire 48 hours after it was created or may be deleted sooner using the + corresponding Session DELETE operation. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.CreateLivenessSessionResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_session_create_liveness_session_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.CreateLivenessSessionResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_liveness_session( # pylint: disable=inconsistent-return-statements + self, session_id: str, **kwargs: Any + ) -> None: + """Delete all session related information for matching the specified session id. + + .. + + [!NOTE] + Deleting a session deactivates the Session Auth Token by blocking future API calls made with + that Auth Token. While this can be used to remove any access for that token, those requests + will still count towards overall resource rate limits. It's best to leverage TokenTTL to limit + length of tokens in the case that it is misused. + + :param session_id: The unique ID to reference this session. Required. + :type session_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_session_delete_liveness_session_request( + session_id=session_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_liveness_session_result(self, session_id: str, **kwargs: Any) -> _models.LivenessSession: + # pylint: disable=line-too-long + """Get session result of detectLiveness/singleModal call. + + :param session_id: The unique ID to reference this session. Required. + :type session_id: str + :return: LivenessSession. The LivenessSession is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.LivenessSession + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "createdDateTime": "2020-02-20 00:00:00", # DateTime when this session was + created. Required. + "id": "str", # The unique ID to reference this session. Required. + "sessionExpired": bool, # Whether or not the session is expired. Required. + "status": "str", # The current status of the session. Required. Known values + are: "NotStarted", "Started", and "ResultAvailable". + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session should + last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each end-user + device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "result": { + "clientRequestId": "str", # The unique clientRequestId that is sent + by the client in the 'client-request-id' header. Required. + "digest": "str", # The server calculated digest for this request. If + the client reported digest differs from the server calculated digest, then + the message integrity between the client and service has been compromised and + the result should not be trusted. For more information, see how to guides on + how to leverage this value to secure your end-to-end solution. Required. + "id": 0, # The unique id to refer to this audit request. Use this id + with the 'start' query parameter to continue on to the next page of audit + results. Required. + "receivedDateTime": "2020-02-20 00:00:00", # The UTC DateTime that + the request was received. Required. + "request": { + "contentType": "str", # The content type of the request. + Required. + "method": "str", # The HTTP method of the request (i.e., + GET, POST, DELETE). Required. + "url": "str", # The relative URL and query of the liveness + request. Required. + "contentLength": 0, # Optional. The length of the request + body in bytes. + "userAgent": "str" # Optional. The user agent used to submit + the request. + }, + "requestId": "str", # The unique requestId that is returned by the + service to the client in the 'apim-request-id' header. Required. + "response": { + "body": { + "livenessDecision": "str", # Optional. The liveness + classification for the target face. Known values are: "uncertain", + "realface", and "spoofface". + "modelVersionUsed": "str", # Optional. The model + version used for liveness classification. Known values are: + "2020-02-15-preview.01", "2021-11-12-preview.03", + "2022-10-15-preview.04", and "2023-03-02-preview.05". + "target": { + "faceRectangle": { + "height": 0, # The height of the + rectangle, in pixels. Required. + "left": 0, # The distance from the + left edge if the image to the left edge of the rectangle, in + pixels. Required. + "top": 0, # The distance from the + top edge if the image to the top edge of the rectangle, in + pixels. Required. + "width": 0 # The width of the + rectangle, in pixels. Required. + }, + "fileName": "str", # The file name which + contains the face rectangle where the liveness classification was + made on. Required. + "imageType": "str", # The image type which + contains the face rectangle where the liveness classification was + made on. Required. Known values are: "Color", "Infrared", and + "Depth". + "timeOffsetWithinFile": 0 # The time offset + within the file of the frame which contains the face rectangle + where the liveness classification was made on. Required. + }, + "verifyResult": { + "isIdentical": bool, # Whether the target + liveness face and comparison image face match. Required. + "matchConfidence": 0.0, # The target face + liveness face and comparison image face verification confidence. + Required. + "verifyImage": { + "faceRectangle": { + "height": 0, # The height of + the rectangle, in pixels. Required. + "left": 0, # The distance + from the left edge if the image to the left edge of the + rectangle, in pixels. Required. + "top": 0, # The distance + from the top edge if the image to the top edge of the + rectangle, in pixels. Required. + "width": 0 # The width of + the rectangle, in pixels. Required. + }, + "qualityForRecognition": "str" # + Quality of face image for recognition. Required. Known values + are: "low", "medium", and "high". + } + } + }, + "latencyInMilliseconds": 0, # The server measured latency + for this request in milliseconds. Required. + "statusCode": 0 # The HTTP status code returned to the + client. Required. + }, + "sessionId": "str" # The unique sessionId of the created session. It + will expire 48 hours after it was created or may be deleted sooner using the + corresponding session DELETE operation. Required. + }, + "sessionStartDateTime": "2020-02-20 00:00:00" # Optional. DateTime when this + session was started by the client. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LivenessSession] = kwargs.pop("cls", None) + + _request = build_face_session_get_liveness_session_result_request( + session_id=session_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LivenessSession, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_liveness_sessions( + self, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.LivenessSessionItem]: + # pylint: disable=line-too-long + """Lists sessions for /detectLiveness/SingleModal. + + List sessions from the last sessionId greater than the 'start'. + + The result should be ordered by sessionId in ascending order. + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of LivenessSessionItem + :rtype: list[~azure.ai.vision.face.models.LivenessSessionItem] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "createdDateTime": "2020-02-20 00:00:00", # DateTime when this + session was created. Required. + "id": "str", # The unique ID to reference this session. Required. + "sessionExpired": bool, # Whether or not the session is expired. + Required. + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session + should last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each + end-user device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "sessionStartDateTime": "2020-02-20 00:00:00" # Optional. DateTime + when this session was started by the client. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LivenessSessionItem]] = kwargs.pop("cls", None) + + _request = build_face_session_get_liveness_sessions_request( + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LivenessSessionItem], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_liveness_session_audit_entries( + self, session_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.LivenessSessionAuditEntry]: + # pylint: disable=line-too-long + """Gets session requests and response body for the session. + + :param session_id: The unique ID to reference this session. Required. + :type session_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of LivenessSessionAuditEntry + :rtype: list[~azure.ai.vision.face.models.LivenessSessionAuditEntry] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "clientRequestId": "str", # The unique clientRequestId that is sent + by the client in the 'client-request-id' header. Required. + "digest": "str", # The server calculated digest for this request. If + the client reported digest differs from the server calculated digest, then + the message integrity between the client and service has been compromised and + the result should not be trusted. For more information, see how to guides on + how to leverage this value to secure your end-to-end solution. Required. + "id": 0, # The unique id to refer to this audit request. Use this id + with the 'start' query parameter to continue on to the next page of audit + results. Required. + "receivedDateTime": "2020-02-20 00:00:00", # The UTC DateTime that + the request was received. Required. + "request": { + "contentType": "str", # The content type of the request. + Required. + "method": "str", # The HTTP method of the request (i.e., + GET, POST, DELETE). Required. + "url": "str", # The relative URL and query of the liveness + request. Required. + "contentLength": 0, # Optional. The length of the request + body in bytes. + "userAgent": "str" # Optional. The user agent used to submit + the request. + }, + "requestId": "str", # The unique requestId that is returned by the + service to the client in the 'apim-request-id' header. Required. + "response": { + "body": { + "livenessDecision": "str", # Optional. The liveness + classification for the target face. Known values are: "uncertain", + "realface", and "spoofface". + "modelVersionUsed": "str", # Optional. The model + version used for liveness classification. Known values are: + "2020-02-15-preview.01", "2021-11-12-preview.03", + "2022-10-15-preview.04", and "2023-03-02-preview.05". + "target": { + "faceRectangle": { + "height": 0, # The height of the + rectangle, in pixels. Required. + "left": 0, # The distance from the + left edge if the image to the left edge of the rectangle, in + pixels. Required. + "top": 0, # The distance from the + top edge if the image to the top edge of the rectangle, in + pixels. Required. + "width": 0 # The width of the + rectangle, in pixels. Required. + }, + "fileName": "str", # The file name which + contains the face rectangle where the liveness classification was + made on. Required. + "imageType": "str", # The image type which + contains the face rectangle where the liveness classification was + made on. Required. Known values are: "Color", "Infrared", and + "Depth". + "timeOffsetWithinFile": 0 # The time offset + within the file of the frame which contains the face rectangle + where the liveness classification was made on. Required. + }, + "verifyResult": { + "isIdentical": bool, # Whether the target + liveness face and comparison image face match. Required. + "matchConfidence": 0.0, # The target face + liveness face and comparison image face verification confidence. + Required. + "verifyImage": { + "faceRectangle": { + "height": 0, # The height of + the rectangle, in pixels. Required. + "left": 0, # The distance + from the left edge if the image to the left edge of the + rectangle, in pixels. Required. + "top": 0, # The distance + from the top edge if the image to the top edge of the + rectangle, in pixels. Required. + "width": 0 # The width of + the rectangle, in pixels. Required. + }, + "qualityForRecognition": "str" # + Quality of face image for recognition. Required. Known values + are: "low", "medium", and "high". + } + } + }, + "latencyInMilliseconds": 0, # The server measured latency + for this request in milliseconds. Required. + "statusCode": 0 # The HTTP status code returned to the + client. Required. + }, + "sessionId": "str" # The unique sessionId of the created session. It + will expire 48 hours after it was created or may be deleted sooner using the + corresponding session DELETE operation. Required. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LivenessSessionAuditEntry]] = kwargs.pop("cls", None) + + _request = build_face_session_get_liveness_session_audit_entries_request( + session_id=session_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LivenessSessionAuditEntry], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def _create_liveness_with_verify_session( + self, body: _models.CreateLivenessSessionContent, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: ... + @overload + def _create_liveness_with_verify_session( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: ... + @overload + def _create_liveness_with_verify_session( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: ... + + @distributed_trace + def _create_liveness_with_verify_session( + self, body: Union[_models.CreateLivenessSessionContent, JSON, IO[bytes]], **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: + # pylint: disable=line-too-long + """Create a new liveness session with verify. Client device submits VerifyImage during the + /detectLivenessWithVerify/singleModal call. + + A session is best for client device scenarios where developers want to authorize a client + device to perform only a liveness detection without granting full access to their resource. + Created sessions have a limited life span and only authorize clients to perform the desired + action before access is expired. + + Permissions includes... + > + * + + + * Ability to call /detectLivenessWithVerify/singleModal for up to 3 retries. + * A token lifetime of 10 minutes. + + .. + + [!NOTE] + + * + + + * Client access can be revoked by deleting the session using the Delete Liveness With Verify + Session operation. + * To retrieve a result, use the Get Liveness With Verify Session. + * To audit the individual requests that a client has made to your resource, use the List + Liveness With Verify Session Audit Entries. + + + Alternative Option: Client device submits VerifyImage during the + /detectLivenessWithVerify/singleModal call. + + .. + + [!NOTE] + Extra measures should be taken to validate that the client is sending the expected + VerifyImage. + + :param body: Is one of the following types: CreateLivenessSessionContent, JSON, IO[bytes] + Required. + :type body: ~azure.ai.vision.face.models.CreateLivenessSessionContent or JSON or IO[bytes] + :return: CreateLivenessWithVerifySessionResult. The CreateLivenessWithVerifySessionResult is + compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreateLivenessWithVerifySessionResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "livenessOperationMode": "str", # Type of liveness mode the client should + follow. Required. "Passive" + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session should + last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each end-user + device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "deviceCorrelationIdSetInClient": bool, # Optional. Whether or not to allow + client to set their own 'deviceCorrelationId' via the Vision SDK. Default is + false, and 'deviceCorrelationId' must be set in this request body. + "sendResultsToClient": bool # Optional. Whether or not to allow a '200 - + Success' response body to be sent to the client, which may be undesirable for + security reasons. Default is false, clients will receive a '204 - NoContent' + empty body response. Regardless of selection, calling Session GetResult will + always contain a response body enabling business logic to be implemented. + } + + # response body for status code(s): 200 + response == { + "authToken": "str", # Bearer token to provide authentication for the Vision + SDK running on a client application. This Bearer token has limited permissions to + perform only the required action and expires after the TTL time. It is also + auditable. Required. + "sessionId": "str", # The unique session ID of the created session. It will + expire 48 hours after it was created or may be deleted sooner using the + corresponding Session DELETE operation. Required. + "verifyImage": { + "faceRectangle": { + "height": 0, # The height of the rectangle, in pixels. + Required. + "left": 0, # The distance from the left edge if the image to + the left edge of the rectangle, in pixels. Required. + "top": 0, # The distance from the top edge if the image to + the top edge of the rectangle, in pixels. Required. + "width": 0 # The width of the rectangle, in pixels. + Required. + }, + "qualityForRecognition": "str" # Quality of face image for + recognition. Required. Known values are: "low", "medium", and "high". + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.CreateLivenessWithVerifySessionResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_session_create_liveness_with_verify_session_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.CreateLivenessWithVerifySessionResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def _create_liveness_with_verify_session_with_verify_image( # pylint: disable=protected-access,name-too-long + self, body: _models._models.CreateLivenessWithVerifySessionContent, **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: ... + @overload + def _create_liveness_with_verify_session_with_verify_image( # pylint: disable=name-too-long + self, body: JSON, **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: ... + + @distributed_trace + def _create_liveness_with_verify_session_with_verify_image( # pylint: disable=name-too-long + self, body: Union[_models._models.CreateLivenessWithVerifySessionContent, JSON], **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: + # pylint: disable=line-too-long + """Create a new liveness session with verify. Provide the verify image during session creation. + + A session is best for client device scenarios where developers want to authorize a client + device to perform only a liveness detection without granting full access to their resource. + Created sessions have a limited life span and only authorize clients to perform the desired + action before access is expired. + + Permissions includes... + > + * + + + * Ability to call /detectLivenessWithVerify/singleModal for up to 3 retries. + * A token lifetime of 10 minutes. + + .. + + [!NOTE] + + * + + + * Client access can be revoked by deleting the session using the Delete Liveness With Verify + Session operation. + * To retrieve a result, use the Get Liveness With Verify Session. + * To audit the individual requests that a client has made to your resource, use the List + Liveness With Verify Session Audit Entries. + + + Recommended Option: VerifyImage is provided during session creation. + + :param body: Is either a CreateLivenessWithVerifySessionContent type or a JSON type. Required. + :type body: ~azure.ai.vision.face.models._models.CreateLivenessWithVerifySessionContent or JSON + :return: CreateLivenessWithVerifySessionResult. The CreateLivenessWithVerifySessionResult is + compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreateLivenessWithVerifySessionResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "Parameters": { + "livenessOperationMode": "str", # Type of liveness mode the client + should follow. Required. "Passive" + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session + should last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each + end-user device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "deviceCorrelationIdSetInClient": bool, # Optional. Whether or not + to allow client to set their own 'deviceCorrelationId' via the Vision SDK. + Default is false, and 'deviceCorrelationId' must be set in this request body. + "sendResultsToClient": bool # Optional. Whether or not to allow a + '200 - Success' response body to be sent to the client, which may be + undesirable for security reasons. Default is false, clients will receive a + '204 - NoContent' empty body response. Regardless of selection, calling + Session GetResult will always contain a response body enabling business logic + to be implemented. + }, + "VerifyImage": filetype + } + + # response body for status code(s): 200 + response == { + "authToken": "str", # Bearer token to provide authentication for the Vision + SDK running on a client application. This Bearer token has limited permissions to + perform only the required action and expires after the TTL time. It is also + auditable. Required. + "sessionId": "str", # The unique session ID of the created session. It will + expire 48 hours after it was created or may be deleted sooner using the + corresponding Session DELETE operation. Required. + "verifyImage": { + "faceRectangle": { + "height": 0, # The height of the rectangle, in pixels. + Required. + "left": 0, # The distance from the left edge if the image to + the left edge of the rectangle, in pixels. Required. + "top": 0, # The distance from the top edge if the image to + the top edge of the rectangle, in pixels. Required. + "width": 0 # The width of the rectangle, in pixels. + Required. + }, + "qualityForRecognition": "str" # Quality of face image for + recognition. Required. Known values are: "low", "medium", and "high". + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.CreateLivenessWithVerifySessionResult] = kwargs.pop("cls", None) + + _body = body.as_dict() if isinstance(body, _model_base.Model) else body + _file_fields: List[str] = ["VerifyImage"] + _data_fields: List[str] = ["Parameters"] + _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) + + _request = build_face_session_create_liveness_with_verify_session_with_verify_image_request( + files=_files, + data=_data, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.CreateLivenessWithVerifySessionResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_liveness_with_verify_session( # pylint: disable=inconsistent-return-statements + self, session_id: str, **kwargs: Any + ) -> None: + """Delete all session related information for matching the specified session id. + + .. + + [!NOTE] + Deleting a session deactivates the Session Auth Token by blocking future API calls made with + that Auth Token. While this can be used to remove any access for that token, those requests + will still count towards overall resource rate limits. It's best to leverage TokenTTL to limit + length of tokens in the case that it is misused. + + :param session_id: The unique ID to reference this session. Required. + :type session_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_session_delete_liveness_with_verify_session_request( + session_id=session_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get_liveness_with_verify_session_result( + self, session_id: str, **kwargs: Any + ) -> _models.LivenessWithVerifySession: + # pylint: disable=line-too-long + """Get session result of detectLivenessWithVerify/singleModal call. + + :param session_id: The unique ID to reference this session. Required. + :type session_id: str + :return: LivenessWithVerifySession. The LivenessWithVerifySession is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.LivenessWithVerifySession + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "createdDateTime": "2020-02-20 00:00:00", # DateTime when this session was + created. Required. + "id": "str", # The unique ID to reference this session. Required. + "sessionExpired": bool, # Whether or not the session is expired. Required. + "status": "str", # The current status of the session. Required. Known values + are: "NotStarted", "Started", and "ResultAvailable". + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session should + last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each end-user + device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "result": { + "clientRequestId": "str", # The unique clientRequestId that is sent + by the client in the 'client-request-id' header. Required. + "digest": "str", # The server calculated digest for this request. If + the client reported digest differs from the server calculated digest, then + the message integrity between the client and service has been compromised and + the result should not be trusted. For more information, see how to guides on + how to leverage this value to secure your end-to-end solution. Required. + "id": 0, # The unique id to refer to this audit request. Use this id + with the 'start' query parameter to continue on to the next page of audit + results. Required. + "receivedDateTime": "2020-02-20 00:00:00", # The UTC DateTime that + the request was received. Required. + "request": { + "contentType": "str", # The content type of the request. + Required. + "method": "str", # The HTTP method of the request (i.e., + GET, POST, DELETE). Required. + "url": "str", # The relative URL and query of the liveness + request. Required. + "contentLength": 0, # Optional. The length of the request + body in bytes. + "userAgent": "str" # Optional. The user agent used to submit + the request. + }, + "requestId": "str", # The unique requestId that is returned by the + service to the client in the 'apim-request-id' header. Required. + "response": { + "body": { + "livenessDecision": "str", # Optional. The liveness + classification for the target face. Known values are: "uncertain", + "realface", and "spoofface". + "modelVersionUsed": "str", # Optional. The model + version used for liveness classification. Known values are: + "2020-02-15-preview.01", "2021-11-12-preview.03", + "2022-10-15-preview.04", and "2023-03-02-preview.05". + "target": { + "faceRectangle": { + "height": 0, # The height of the + rectangle, in pixels. Required. + "left": 0, # The distance from the + left edge if the image to the left edge of the rectangle, in + pixels. Required. + "top": 0, # The distance from the + top edge if the image to the top edge of the rectangle, in + pixels. Required. + "width": 0 # The width of the + rectangle, in pixels. Required. + }, + "fileName": "str", # The file name which + contains the face rectangle where the liveness classification was + made on. Required. + "imageType": "str", # The image type which + contains the face rectangle where the liveness classification was + made on. Required. Known values are: "Color", "Infrared", and + "Depth". + "timeOffsetWithinFile": 0 # The time offset + within the file of the frame which contains the face rectangle + where the liveness classification was made on. Required. + }, + "verifyResult": { + "isIdentical": bool, # Whether the target + liveness face and comparison image face match. Required. + "matchConfidence": 0.0, # The target face + liveness face and comparison image face verification confidence. + Required. + "verifyImage": { + "faceRectangle": { + "height": 0, # The height of + the rectangle, in pixels. Required. + "left": 0, # The distance + from the left edge if the image to the left edge of the + rectangle, in pixels. Required. + "top": 0, # The distance + from the top edge if the image to the top edge of the + rectangle, in pixels. Required. + "width": 0 # The width of + the rectangle, in pixels. Required. + }, + "qualityForRecognition": "str" # + Quality of face image for recognition. Required. Known values + are: "low", "medium", and "high". + } + } + }, + "latencyInMilliseconds": 0, # The server measured latency + for this request in milliseconds. Required. + "statusCode": 0 # The HTTP status code returned to the + client. Required. + }, + "sessionId": "str" # The unique sessionId of the created session. It + will expire 48 hours after it was created or may be deleted sooner using the + corresponding session DELETE operation. Required. + }, + "sessionStartDateTime": "2020-02-20 00:00:00" # Optional. DateTime when this + session was started by the client. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LivenessWithVerifySession] = kwargs.pop("cls", None) + + _request = build_face_session_get_liveness_with_verify_session_result_request( + session_id=session_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LivenessWithVerifySession, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_liveness_with_verify_sessions( + self, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.LivenessSessionItem]: + # pylint: disable=line-too-long + """Lists sessions for /detectLivenessWithVerify/SingleModal. + + List sessions from the last sessionId greater than the "start". + + The result should be ordered by sessionId in ascending order. + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of LivenessSessionItem + :rtype: list[~azure.ai.vision.face.models.LivenessSessionItem] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "createdDateTime": "2020-02-20 00:00:00", # DateTime when this + session was created. Required. + "id": "str", # The unique ID to reference this session. Required. + "sessionExpired": bool, # Whether or not the session is expired. + Required. + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session + should last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each + end-user device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "sessionStartDateTime": "2020-02-20 00:00:00" # Optional. DateTime + when this session was started by the client. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LivenessSessionItem]] = kwargs.pop("cls", None) + + _request = build_face_session_get_liveness_with_verify_sessions_request( + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LivenessSessionItem], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_liveness_with_verify_session_audit_entries( # pylint: disable=name-too-long + self, session_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.LivenessSessionAuditEntry]: + # pylint: disable=line-too-long + """Gets session requests and response body for the session. + + :param session_id: The unique ID to reference this session. Required. + :type session_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of LivenessSessionAuditEntry + :rtype: list[~azure.ai.vision.face.models.LivenessSessionAuditEntry] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "clientRequestId": "str", # The unique clientRequestId that is sent + by the client in the 'client-request-id' header. Required. + "digest": "str", # The server calculated digest for this request. If + the client reported digest differs from the server calculated digest, then + the message integrity between the client and service has been compromised and + the result should not be trusted. For more information, see how to guides on + how to leverage this value to secure your end-to-end solution. Required. + "id": 0, # The unique id to refer to this audit request. Use this id + with the 'start' query parameter to continue on to the next page of audit + results. Required. + "receivedDateTime": "2020-02-20 00:00:00", # The UTC DateTime that + the request was received. Required. + "request": { + "contentType": "str", # The content type of the request. + Required. + "method": "str", # The HTTP method of the request (i.e., + GET, POST, DELETE). Required. + "url": "str", # The relative URL and query of the liveness + request. Required. + "contentLength": 0, # Optional. The length of the request + body in bytes. + "userAgent": "str" # Optional. The user agent used to submit + the request. + }, + "requestId": "str", # The unique requestId that is returned by the + service to the client in the 'apim-request-id' header. Required. + "response": { + "body": { + "livenessDecision": "str", # Optional. The liveness + classification for the target face. Known values are: "uncertain", + "realface", and "spoofface". + "modelVersionUsed": "str", # Optional. The model + version used for liveness classification. Known values are: + "2020-02-15-preview.01", "2021-11-12-preview.03", + "2022-10-15-preview.04", and "2023-03-02-preview.05". + "target": { + "faceRectangle": { + "height": 0, # The height of the + rectangle, in pixels. Required. + "left": 0, # The distance from the + left edge if the image to the left edge of the rectangle, in + pixels. Required. + "top": 0, # The distance from the + top edge if the image to the top edge of the rectangle, in + pixels. Required. + "width": 0 # The width of the + rectangle, in pixels. Required. + }, + "fileName": "str", # The file name which + contains the face rectangle where the liveness classification was + made on. Required. + "imageType": "str", # The image type which + contains the face rectangle where the liveness classification was + made on. Required. Known values are: "Color", "Infrared", and + "Depth". + "timeOffsetWithinFile": 0 # The time offset + within the file of the frame which contains the face rectangle + where the liveness classification was made on. Required. + }, + "verifyResult": { + "isIdentical": bool, # Whether the target + liveness face and comparison image face match. Required. + "matchConfidence": 0.0, # The target face + liveness face and comparison image face verification confidence. + Required. + "verifyImage": { + "faceRectangle": { + "height": 0, # The height of + the rectangle, in pixels. Required. + "left": 0, # The distance + from the left edge if the image to the left edge of the + rectangle, in pixels. Required. + "top": 0, # The distance + from the top edge if the image to the top edge of the + rectangle, in pixels. Required. + "width": 0 # The width of + the rectangle, in pixels. Required. + }, + "qualityForRecognition": "str" # + Quality of face image for recognition. Required. Known values + are: "low", "medium", and "high". + } + } + }, + "latencyInMilliseconds": 0, # The server measured latency + for this request in milliseconds. Required. + "statusCode": 0 # The HTTP status code returned to the + client. Required. + }, + "sessionId": "str" # The unique sessionId of the created session. It + will expire 48 hours after it was created or may be deleted sooner using the + corresponding session DELETE operation. Required. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LivenessSessionAuditEntry]] = kwargs.pop("cls", None) + + _request = build_face_session_get_liveness_with_verify_session_audit_entries_request( + session_id=session_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LivenessSessionAuditEntry], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_operations/_patch.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_operations/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_patch.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_serialization.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_serialization.py new file mode 100644 index 000000000000..2f781d740827 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_serialization.py @@ -0,0 +1,1998 @@ +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# pylint: skip-file +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Dict, + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + TypeVar, + MutableMapping, + Type, + List, + Mapping, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore + +from azure.core.exceptions import DeserializationError, SerializationError +from azure.core.serialization import NULL as CoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +ModelType = TypeVar("ModelType", bound="Model") +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError as err: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise DeserializationError("XML is invalid") from err + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + + +class UTC(datetime.tzinfo): + """Time Zone info for handling UTC""" + + def utcoffset(self, dt): + """UTF offset for UTC is 0.""" + return datetime.timedelta(0) + + def tzname(self, dt): + """Timestamp representation.""" + return "Z" + + def dst(self, dt): + """No daylight saving for UTC.""" + return datetime.timedelta(hours=1) + + +try: + from datetime import timezone as _FixedOffset # type: ignore +except ImportError: # Python 2.7 + + class _FixedOffset(datetime.tzinfo): # type: ignore + """Fixed offset in minutes east from UTC. + Copy/pasted from Python doc + :param datetime.timedelta offset: offset in timedelta format + """ + + def __init__(self, offset): + self.__offset = offset + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return str(self.__offset.total_seconds() / 3600) + + def __repr__(self): + return "".format(self.tzname(None)) + + def dst(self, dt): + return datetime.timedelta(0) + + def __getinitargs__(self): + return (self.__offset,) + + +try: + from datetime import timezone + + TZ_UTC = timezone.utc +except ImportError: + TZ_UTC = UTC() # type: ignore + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Optional[Dict[str, Any]] = {} + for k in kwargs: + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node.""" + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to server from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize(self, keep_readonly=keep_readonly, **kwargs) # type: ignore + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize(self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs) # type: ignore + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def from_dict( + cls: Type[ModelType], + data: Any, + key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> ModelType: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result.update(objects[valuetype]._flatten_subtype(key, objects)) + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + Remove the polymorphic key from the initial data. + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.pop(rest_api_response_key, None) or response.pop(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + """ + return key.replace("\\.", ".") + + +class Serializer(object): + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, type]] = None): + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize(self, target_obj, data_type=None, **kwargs): + """Serialize data into a string according to type. + + :param target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises: SerializationError if serialization fails. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() + try: + attributes = target_obj._attribute_map + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get(attr_name, {}).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized.update(target_obj.additional_properties) + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise SerializationError(msg) from err + else: + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises: SerializationError if serialization fails. + :raises: ValueError if data is None + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) + except DeserializationError as err: + raise SerializationError("Unable to build a model: " + str(err)) from err + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + output = output.replace("{", quote("{")).replace("}", quote("}")) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :keyword bool skip_quote: Whether to skip quote the serialized result. + Defaults to False. + :rtype: str, list + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get("skip_quote", False) + return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :param bool required: Whether it's essential that the data not be + empty or None + :raises: AttributeError if required data is None. + :raises: ValueError if data is None + :raises: SerializationError if serialization fails. + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is CoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + elif data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, data.__class__) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise SerializationError(msg.format(data, data_type)) from err + else: + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param data: Object to be serialized. + :param str data_type: Type of object in the iterable. + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param data: Object to be serialized. + :rtype: str + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + else: + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list attr: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param bool required: Whether the objects in the iterable must + not be None or empty. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + :keyword bool do_quote: Whether to quote the serialized result of each iterable element. + Defaults to False. + :rtype: list, str + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get("do_quote", False): + serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :param bool required: Whether the objects in the dictionary must + not be None or empty. + :rtype: dict + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + elif obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) + + @staticmethod + def serialize_bytearray(attr, **kwargs): + """Serialize bytearray into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): + """Serialize str into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): + """Serialize Decimal object to float. + + :param attr: Object to be serialized. + :rtype: float + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): + """Serialize long (Py2) or int (Py3). + + :param attr: Object to be serialized. + :rtype: int/long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: TypeError if format invalid. + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError: + raise TypeError("RFC1123 object must be valid Datetime object.") + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: SerializationError if format invalid. + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise SerializationError(msg) from err + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise TypeError(msg) from err + + @staticmethod + def serialize_unix(attr, **kwargs): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises: SerializationError if format invalid + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError: + raise TypeError("Unix time object must be valid Datetime object.") + + +def rest_key_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(List[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key.""" + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + else: + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + else: # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer(object): + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, type]] = None): + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, str): + return self.deserialize_data(data, response) + elif isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None: + return data + try: + attributes = response._attribute_map # type: ignore + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise DeserializationError(msg) from err + else: + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + """ + if target is None: + return None, None + + if isinstance(target, str): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) # type: ignore + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + """ + try: + return self(target_obj, data, content_type=content_type) + except: + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param raw_data: Data to be processed. + :param content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param response: The response model class. + :param d_attrs: The deserialized response attributes. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [k for k, v in response._validation.items() if v.get("readonly")] + const = [k for k, v in response._validation.items() if v.get("constant")] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) + + def deserialize_data(self, data, data_type): + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in ["object", "[]", r"{}"] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise DeserializationError(msg) from err + else: + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :rtype: dict + :raises: TypeError if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, str): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + else: + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :rtype: str, int, float or bool + :raises: TypeError if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + else: + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + elif isinstance(attr, str): + if attr.lower() in ["true", "1"]: + return True + elif attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + else: + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + try: + return list(enum_obj.__members__.values())[data] + except IndexError: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :rtype: Decimal + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(str(attr)) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise DeserializationError(msg) from err + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :rtype: long or int + :raises: ValueError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :rtype: TimeDelta + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise DeserializationError(msg) from err + else: + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :rtype: Date + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=0, defaultday=0) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise DeserializationError(msg) from err + else: + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise DeserializationError(msg) from err + else: + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :rtype: Datetime + :raises: DeserializationError if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + attr = int(attr) + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise DeserializationError(msg) from err + else: + return date_obj diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_vendor.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_vendor.py new file mode 100644 index 000000000000..3177cb12a490 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_vendor.py @@ -0,0 +1,91 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +import json +from typing import Any, Dict, IO, List, Mapping, Optional, Sequence, TYPE_CHECKING, Tuple, Union + +from ._configuration import ( + FaceAdministrationClientConfiguration, + FaceClientConfiguration, + FaceSessionClientConfiguration, +) +from ._model_base import Model, SdkJSONEncoder + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core import PipelineClient + + from ._serialization import Deserializer, Serializer + + +class FaceClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "PipelineClient" + _config: FaceClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +class FaceAdministrationClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "PipelineClient" + _config: FaceAdministrationClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +class FaceSessionClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "PipelineClient" + _config: FaceSessionClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +# file-like tuple could be `(filename, IO (or bytes))` or `(filename, IO (or bytes), content_type)` +FileContent = Union[str, bytes, IO[str], IO[bytes]] + +FileType = Union[ + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], FileContent, Optional[str]], +] + +FilesType = Union[Mapping[str, FileType], Sequence[Tuple[str, FileType]]] + + +def serialize_multipart_data_entry(data_entry: Any) -> Any: + if isinstance(data_entry, (list, tuple, dict, Model)): + return json.dumps(data_entry, cls=SdkJSONEncoder, exclude_readonly=True) + return data_entry + + +def prepare_multipart_form_data( + body: Mapping[str, Any], multipart_fields: List[str], data_fields: List[str] +) -> Tuple[List[FileType], Dict[str, Any]]: + files: List[FileType] = [] + data: Dict[str, Any] = {} + for multipart_field in multipart_fields: + multipart_entry = body.get(multipart_field) + if isinstance(multipart_entry, list): + files.extend([(multipart_field, e) for e in multipart_entry]) + elif multipart_entry: + files.append((multipart_field, multipart_entry)) + + for data_field in data_fields: + data_entry = body.get(data_field) + if data_entry: + data[data_field] = serialize_multipart_data_entry(data_entry) + + return files, data diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_version.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_version.py new file mode 100644 index 000000000000..be71c81bd282 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0.0b1" diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/__init__.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/__init__.py new file mode 100644 index 000000000000..696348801174 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/__init__.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._client import FaceClient +from ._client import FaceAdministrationClient +from ._client import FaceSessionClient + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "FaceClient", + "FaceAdministrationClient", + "FaceSessionClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_client.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_client.py new file mode 100644 index 000000000000..33890cfdd37d --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_client.py @@ -0,0 +1,284 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, TYPE_CHECKING, Union + +from azure.core import AsyncPipelineClient +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest + +from .._serialization import Deserializer, Serializer +from ._configuration import ( + FaceAdministrationClientConfiguration, + FaceClientConfiguration, + FaceSessionClientConfiguration, +) +from ._operations import ( + FaceAdministrationClientOperationsMixin, + FaceClientOperationsMixin, + FaceSessionClientOperationsMixin, +) + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class FaceClient(FaceClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """FaceClient. + + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: + https://{resource-name}.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: API Version. Default value is "v1.1-preview.1". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str or ~azure.ai.vision.face.models.Versions + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + _endpoint = "{endpoint}/face/{apiVersion}" + self._config = FaceClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "FaceClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) + + +class FaceAdministrationClient( + FaceAdministrationClientOperationsMixin +): # pylint: disable=client-accepts-api-version-keyword + """FaceAdministrationClient. + + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: + https://{resource-name}.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: API Version. Default value is "v1.1-preview.1". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str or ~azure.ai.vision.face.models.Versions + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + _endpoint = "{endpoint}/face/{apiVersion}" + self._config = FaceAdministrationClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "FaceAdministrationClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) + + +class FaceSessionClient(FaceSessionClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """FaceSessionClient. + + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: + https://{resource-name}.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: API Version. Default value is "v1.1-preview.1". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str or ~azure.ai.vision.face.models.Versions + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + _endpoint = "{endpoint}/face/{apiVersion}" + self._config = FaceSessionClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "FaceSessionClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_configuration.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_configuration.py new file mode 100644 index 000000000000..997338152abc --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_configuration.py @@ -0,0 +1,189 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING, Union + +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies + +from .._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class FaceClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for FaceClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: + https://{resource-name}.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: API Version. Default value is "v1.1-preview.1". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str or ~azure.ai.vision.face.models.Versions + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "v1.1-preview.1") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-vision-face/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Ocp-Apim-Subscription-Key", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) + + +class FaceAdministrationClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for FaceAdministrationClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: + https://{resource-name}.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: API Version. Default value is "v1.1-preview.1". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str or ~azure.ai.vision.face.models.Versions + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "v1.1-preview.1") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-vision-face/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Ocp-Apim-Subscription-Key", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) + + +class FaceSessionClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for FaceSessionClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: + https://{resource-name}.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: API Version. Default value is "v1.1-preview.1". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str or ~azure.ai.vision.face.models.Versions + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "v1.1-preview.1") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-vision-face/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Ocp-Apim-Subscription-Key", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_operations/__init__.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_operations/__init__.py new file mode 100644 index 000000000000..9642d0953f19 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_operations/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import FaceClientOperationsMixin +from ._operations import FaceAdministrationClientOperationsMixin +from ._operations import FaceSessionClientOperationsMixin + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "FaceClientOperationsMixin", + "FaceAdministrationClientOperationsMixin", + "FaceSessionClientOperationsMixin", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_operations/_operations.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_operations/_operations.py new file mode 100644 index 000000000000..2cd629abda47 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_operations/_operations.py @@ -0,0 +1,18070 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, Callable, Dict, IO, List, Optional, Type, TypeVar, Union, cast, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.polling.async_base_polling import AsyncLROBasePolling +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import _model_base, models as _models +from ..._model_base import SdkJSONEncoder, _deserialize +from ..._operations._operations import ( + build_face_administration_add_face_list_face_from_url_request, + build_face_administration_add_face_list_face_request, + build_face_administration_add_large_face_list_face_from_url_request, + build_face_administration_add_large_face_list_face_request, + build_face_administration_add_large_person_group_person_face_from_url_request, + build_face_administration_add_large_person_group_person_face_request, + build_face_administration_add_person_face_from_url_request, + build_face_administration_add_person_face_request, + build_face_administration_add_person_group_person_face_from_url_request, + build_face_administration_add_person_group_person_face_request, + build_face_administration_create_dynamic_person_group_request, + build_face_administration_create_dynamic_person_group_with_person_request, + build_face_administration_create_face_list_request, + build_face_administration_create_large_face_list_request, + build_face_administration_create_large_person_group_person_request, + build_face_administration_create_large_person_group_request, + build_face_administration_create_person_group_person_request, + build_face_administration_create_person_group_request, + build_face_administration_create_person_request, + build_face_administration_delete_dynamic_person_group_request, + build_face_administration_delete_face_list_face_request, + build_face_administration_delete_face_list_request, + build_face_administration_delete_large_face_list_face_request, + build_face_administration_delete_large_face_list_request, + build_face_administration_delete_large_person_group_person_face_request, + build_face_administration_delete_large_person_group_person_request, + build_face_administration_delete_large_person_group_request, + build_face_administration_delete_person_face_request, + build_face_administration_delete_person_group_person_face_request, + build_face_administration_delete_person_group_person_request, + build_face_administration_delete_person_group_request, + build_face_administration_delete_person_request, + build_face_administration_get_dynamic_person_group_persons_request, + build_face_administration_get_dynamic_person_group_references_request, + build_face_administration_get_dynamic_person_group_request, + build_face_administration_get_dynamic_person_groups_request, + build_face_administration_get_face_list_request, + build_face_administration_get_face_lists_request, + build_face_administration_get_large_face_list_face_request, + build_face_administration_get_large_face_list_faces_request, + build_face_administration_get_large_face_list_request, + build_face_administration_get_large_face_list_training_status_request, + build_face_administration_get_large_face_lists_request, + build_face_administration_get_large_person_group_person_face_request, + build_face_administration_get_large_person_group_person_request, + build_face_administration_get_large_person_group_persons_request, + build_face_administration_get_large_person_group_request, + build_face_administration_get_large_person_group_training_status_request, + build_face_administration_get_large_person_groups_request, + build_face_administration_get_person_face_request, + build_face_administration_get_person_faces_request, + build_face_administration_get_person_group_person_face_request, + build_face_administration_get_person_group_person_request, + build_face_administration_get_person_group_persons_request, + build_face_administration_get_person_group_request, + build_face_administration_get_person_group_training_status_request, + build_face_administration_get_person_groups_request, + build_face_administration_get_person_request, + build_face_administration_get_persons_request, + build_face_administration_train_large_face_list_request, + build_face_administration_train_large_person_group_request, + build_face_administration_train_person_group_request, + build_face_administration_update_dynamic_person_group_request, + build_face_administration_update_dynamic_person_group_with_person_changes_request, + build_face_administration_update_face_list_request, + build_face_administration_update_large_face_list_face_request, + build_face_administration_update_large_face_list_request, + build_face_administration_update_large_person_group_person_face_request, + build_face_administration_update_large_person_group_person_request, + build_face_administration_update_large_person_group_request, + build_face_administration_update_person_face_request, + build_face_administration_update_person_group_person_face_request, + build_face_administration_update_person_group_person_request, + build_face_administration_update_person_group_request, + build_face_administration_update_person_request, + build_face_detect_from_url_request, + build_face_detect_request, + build_face_find_similar_from_face_list_request, + build_face_find_similar_from_large_face_list_request, + build_face_find_similar_request, + build_face_group_request, + build_face_identify_from_dynamic_person_group_request, + build_face_identify_from_large_person_group_request, + build_face_identify_from_person_directory_request, + build_face_identify_from_person_group_request, + build_face_session_create_liveness_session_request, + build_face_session_create_liveness_with_verify_session_request, + build_face_session_create_liveness_with_verify_session_with_verify_image_request, + build_face_session_delete_liveness_session_request, + build_face_session_delete_liveness_with_verify_session_request, + build_face_session_get_liveness_session_audit_entries_request, + build_face_session_get_liveness_session_result_request, + build_face_session_get_liveness_sessions_request, + build_face_session_get_liveness_with_verify_session_audit_entries_request, + build_face_session_get_liveness_with_verify_session_result_request, + build_face_session_get_liveness_with_verify_sessions_request, + build_face_verify_face_to_face_request, + build_face_verify_from_large_person_group_request, + build_face_verify_from_person_directory_request, + build_face_verify_from_person_group_request, +) +from ..._vendor import prepare_multipart_form_data +from .._vendor import FaceAdministrationClientMixinABC, FaceClientMixinABC, FaceSessionClientMixinABC + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class FaceClientOperationsMixin(FaceClientMixinABC): + + @overload + async def _detect_from_url( + self, + body: JSON, + *, + content_type: str = "application/json", + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + return_face_id: Optional[bool] = None, + return_face_attributes: Optional[List[Union[str, _models.FaceAttributeType]]] = None, + return_face_landmarks: Optional[bool] = None, + return_recognition_model: Optional[bool] = None, + face_id_time_to_live: Optional[int] = None, + **kwargs: Any + ) -> List[_models.FaceDetectionResult]: ... + @overload + async def _detect_from_url( + self, + *, + url: str, + content_type: str = "application/json", + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + return_face_id: Optional[bool] = None, + return_face_attributes: Optional[List[Union[str, _models.FaceAttributeType]]] = None, + return_face_landmarks: Optional[bool] = None, + return_recognition_model: Optional[bool] = None, + face_id_time_to_live: Optional[int] = None, + **kwargs: Any + ) -> List[_models.FaceDetectionResult]: ... + @overload + async def _detect_from_url( + self, + body: IO[bytes], + *, + content_type: str = "application/json", + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + return_face_id: Optional[bool] = None, + return_face_attributes: Optional[List[Union[str, _models.FaceAttributeType]]] = None, + return_face_landmarks: Optional[bool] = None, + return_recognition_model: Optional[bool] = None, + face_id_time_to_live: Optional[int] = None, + **kwargs: Any + ) -> List[_models.FaceDetectionResult]: ... + + @distributed_trace_async + async def _detect_from_url( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + return_face_id: Optional[bool] = None, + return_face_attributes: Optional[List[Union[str, _models.FaceAttributeType]]] = None, + return_face_landmarks: Optional[bool] = None, + return_recognition_model: Optional[bool] = None, + face_id_time_to_live: Optional[int] = None, + **kwargs: Any + ) -> List[_models.FaceDetectionResult]: + # pylint: disable=line-too-long + """Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, + and attributes. + + .. + + [!IMPORTANT] + To mitigate potential misuse that can subject people to stereotyping, discrimination, or + unfair denial of services, we are retiring Face API attributes that predict emotion, gender, + age, smile, facial hair, hair, and makeup. Read more about this decision + https://azure.microsoft.com/en-us/blog/responsible-ai-investments-and-safeguards-for-facial-recognition/. + + + * + + + * No image will be stored. Only the extracted face feature(s) will be stored on server. The + faceId is an identifier of the face feature and will be used in "Identify", "Verify", and "Find + Similar". The stored face features will expire and be deleted at the time specified by + faceIdTimeToLive after the original detection call. + * Optional parameters include faceId, landmarks, and attributes. Attributes include headPose, + glasses, occlusion, accessories, blur, exposure, noise, mask, and qualityForRecognition. Some + of the results returned for specific attributes may not be highly accurate. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from + large to small. + * For optimal results when querying "Identify", "Verify", and "Find Similar" ('returnFaceId' is + true), please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels + (100 pixels between eyes). + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + * 'detection_02': Face attributes and landmarks are disabled if you choose this detection + model. + * 'detection_03': Face attributes (mask and headPose only) and landmarks are supported if you + choose this detection model. + + * Different 'recognitionModel' values are provided. If follow-up operations like "Verify", + "Identify", "Find Similar" are needed, please specify the recognition model with + 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if + latest model needed, please explicitly specify the model you need in this parameter. Once + specified, the detected faceIds will be associated with the specified recognition model. More + details, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-recognition-model. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword recognition_model: The 'recognitionModel' associated with the detected faceIds. + Supported 'recognitionModel' values include 'recognition_01', 'recognition_02', + 'recognition_03' or 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' + is recommended since its accuracy is improved on faces wearing masks compared with + 'recognition_03', and its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", "recognition_03", and + "recognition_04". Default value is None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :keyword return_face_id: Return faceIds of the detected faces or not. The default value is + true. Default value is None. + :paramtype return_face_id: bool + :keyword return_face_attributes: Analyze and return the one or more specified face attributes + in the comma-separated string like 'returnFaceAttributes=headPose,glasses'. Face attribute + analysis has additional computational and time cost. Default value is None. + :paramtype return_face_attributes: list[str or ~azure.ai.vision.face.models.FaceAttributeType] + :keyword return_face_landmarks: Return face landmarks of the detected faces or not. The default + value is false. Default value is None. + :paramtype return_face_landmarks: bool + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. This is only applicable when returnFaceId = true. Default value is None. + :paramtype return_recognition_model: bool + :keyword face_id_time_to_live: The number of seconds for the face ID being cached. Supported + range from 60 seconds up to 86400 seconds. The default value is 86400 (24 hours). Default value + is None. + :paramtype face_id_time_to_live: int + :return: list of FaceDetectionResult + :rtype: list[~azure.ai.vision.face.models.FaceDetectionResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == [ + { + "faceRectangle": { + "height": 0, # The height of the rectangle, in pixels. + Required. + "left": 0, # The distance from the left edge if the image to + the left edge of the rectangle, in pixels. Required. + "top": 0, # The distance from the top edge if the image to + the top edge of the rectangle, in pixels. Required. + "width": 0 # The width of the rectangle, in pixels. + Required. + }, + "faceAttributes": { + "accessories": [ + { + "confidence": 0.0, # Confidence level of the + accessory type. Range between [0,1]. Required. + "type": "str" # Type of the accessory. + Required. Known values are: "headwear", "glasses", and "mask". + } + ], + "age": 0.0, # Optional. Age in years. + "blur": { + "blurLevel": "str", # An enum value indicating level + of blurriness. Required. Known values are: "low", "medium", and + "high". + "value": 0.0 # A number indicating level of + blurriness ranging from 0 to 1. Required. + }, + "exposure": { + "exposureLevel": "str", # An enum value indicating + level of exposure. Required. Known values are: "underExposure", + "goodExposure", and "overExposure". + "value": 0.0 # A number indicating level of exposure + level ranging from 0 to 1. [0, 0.25) is under exposure. [0.25, 0.75) + is good exposure. [0.75, 1] is over exposure. Required. + }, + "facialHair": { + "beard": 0.0, # A number ranging from 0 to 1 + indicating a level of confidence associated with a property. + Required. + "moustache": 0.0, # A number ranging from 0 to 1 + indicating a level of confidence associated with a property. + Required. + "sideburns": 0.0 # A number ranging from 0 to 1 + indicating a level of confidence associated with a property. + Required. + }, + "glasses": "str", # Optional. Glasses type if any of the + face. Known values are: "noGlasses", "readingGlasses", "sunglasses", and + "swimmingGoggles". + "hair": { + "bald": 0.0, # A number describing confidence level + of whether the person is bald. Required. + "hairColor": [ + { + "color": "str", # Name of the hair + color. Required. Known values are: "unknown", "white", + "gray", "blond", "brown", "red", "black", and "other". + "confidence": 0.0 # Confidence level + of the color. Range between [0,1]. Required. + } + ], + "invisible": bool # A boolean value describing + whether the hair is visible in the image. Required. + }, + "headPose": { + "pitch": 0.0, # Value of angles. Required. + "roll": 0.0, # Value of angles. Required. + "yaw": 0.0 # Value of angles. Required. + }, + "mask": { + "noseAndMouthCovered": bool, # A boolean value + indicating whether nose and mouth are covered. Required. + "type": "str" # Type of the mask. Required. Known + values are: "faceMask", "noMask", "otherMaskOrOcclusion", and + "uncertain". + }, + "noise": { + "noiseLevel": "str", # An enum value indicating + level of noise. Required. Known values are: "low", "medium", and + "high". + "value": 0.0 # A number indicating level of noise + level ranging from 0 to 1. [0, 0.25) is under exposure. [0.25, 0.75) + is good exposure. [0.75, 1] is over exposure. [0, 0.3) is low noise + level. [0.3, 0.7) is medium noise level. [0.7, 1] is high noise + level. Required. + }, + "occlusion": { + "eyeOccluded": bool, # A boolean value indicating + whether eyes are occluded. Required. + "foreheadOccluded": bool, # A boolean value + indicating whether forehead is occluded. Required. + "mouthOccluded": bool # A boolean value indicating + whether the mouth is occluded. Required. + }, + "qualityForRecognition": "str", # Optional. Properties + describing the overall image quality regarding whether the image being + used in the detection is of sufficient quality to attempt face + recognition on. Known values are: "low", "medium", and "high". + "smile": 0.0 # Optional. Smile intensity, a number between + [0,1]. + }, + "faceId": "str", # Optional. Unique faceId of the detected face, + created by detection API and it will expire 24 hours after the detection + call. To return this, it requires 'returnFaceId' parameter to be true. + "faceLandmarks": { + "eyeLeftBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeLeftInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeLeftOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeLeftTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowLeftInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowLeftOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowRightInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowRightOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "mouthLeft": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "mouthRight": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseLeftAlarOutTip": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseLeftAlarTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRightAlarOutTip": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRightAlarTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRootLeft": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRootRight": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseTip": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "pupilLeft": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "pupilRight": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "underLipBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "underLipTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "upperLipBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "upperLipTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + } + }, + "recognitionModel": "str" # Optional. The 'recognitionModel' + associated with this faceId. This is only returned when + 'returnRecognitionModel' is explicitly set as true. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + cls: ClsType[List[_models.FaceDetectionResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if url is _Unset: + raise TypeError("missing required argument: url") + body = {"url": url} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_detect_from_url_request( + detection_model=detection_model, + recognition_model=recognition_model, + return_face_id=return_face_id, + return_face_attributes=return_face_attributes, + return_face_landmarks=return_face_landmarks, + return_recognition_model=return_recognition_model, + face_id_time_to_live=face_id_time_to_live, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceDetectionResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def _detect( + self, + image_content: bytes, + *, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + return_face_id: Optional[bool] = None, + return_face_attributes: Optional[List[Union[str, _models.FaceAttributeType]]] = None, + return_face_landmarks: Optional[bool] = None, + return_recognition_model: Optional[bool] = None, + face_id_time_to_live: Optional[int] = None, + **kwargs: Any + ) -> List[_models.FaceDetectionResult]: + # pylint: disable=line-too-long + """Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, + and attributes. + + .. + + [!IMPORTANT] + To mitigate potential misuse that can subject people to stereotyping, discrimination, or + unfair denial of services, we are retiring Face API attributes that predict emotion, gender, + age, smile, facial hair, hair, and makeup. Read more about this decision + https://azure.microsoft.com/en-us/blog/responsible-ai-investments-and-safeguards-for-facial-recognition/. + + + * + + + * No image will be stored. Only the extracted face feature(s) will be stored on server. The + faceId is an identifier of the face feature and will be used in "Identify", "Verify", and "Find + Similar". The stored face features will expire and be deleted at the time specified by + faceIdTimeToLive after the original detection call. + * Optional parameters include faceId, landmarks, and attributes. Attributes include headPose, + glasses, occlusion, accessories, blur, exposure, noise, mask, and qualityForRecognition. Some + of the results returned for specific attributes may not be highly accurate. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from + large to small. + * For optimal results when querying "Identify", "Verify", and "Find Similar" ('returnFaceId' is + true), please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels + (100 pixels between eyes). + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + * 'detection_02': Face attributes and landmarks are disabled if you choose this detection + model. + * 'detection_03': Face attributes (mask and headPose only) and landmarks are supported if you + choose this detection model. + + * Different 'recognitionModel' values are provided. If follow-up operations like "Verify", + "Identify", "Find Similar" are needed, please specify the recognition model with + 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if + latest model needed, please explicitly specify the model you need in this parameter. Once + specified, the detected faceIds will be associated with the specified recognition model. More + details, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-recognition-model. + + :param image_content: The input image binary. Required. + :type image_content: bytes + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword recognition_model: The 'recognitionModel' associated with the detected faceIds. + Supported 'recognitionModel' values include 'recognition_01', 'recognition_02', + 'recognition_03' or 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' + is recommended since its accuracy is improved on faces wearing masks compared with + 'recognition_03', and its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", "recognition_03", and + "recognition_04". Default value is None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :keyword return_face_id: Return faceIds of the detected faces or not. The default value is + true. Default value is None. + :paramtype return_face_id: bool + :keyword return_face_attributes: Analyze and return the one or more specified face attributes + in the comma-separated string like 'returnFaceAttributes=headPose,glasses'. Face attribute + analysis has additional computational and time cost. Default value is None. + :paramtype return_face_attributes: list[str or ~azure.ai.vision.face.models.FaceAttributeType] + :keyword return_face_landmarks: Return face landmarks of the detected faces or not. The default + value is false. Default value is None. + :paramtype return_face_landmarks: bool + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. This is only applicable when returnFaceId = true. Default value is None. + :paramtype return_recognition_model: bool + :keyword face_id_time_to_live: The number of seconds for the face ID being cached. Supported + range from 60 seconds up to 86400 seconds. The default value is 86400 (24 hours). Default value + is None. + :paramtype face_id_time_to_live: int + :return: list of FaceDetectionResult + :rtype: list[~azure.ai.vision.face.models.FaceDetectionResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "faceRectangle": { + "height": 0, # The height of the rectangle, in pixels. + Required. + "left": 0, # The distance from the left edge if the image to + the left edge of the rectangle, in pixels. Required. + "top": 0, # The distance from the top edge if the image to + the top edge of the rectangle, in pixels. Required. + "width": 0 # The width of the rectangle, in pixels. + Required. + }, + "faceAttributes": { + "accessories": [ + { + "confidence": 0.0, # Confidence level of the + accessory type. Range between [0,1]. Required. + "type": "str" # Type of the accessory. + Required. Known values are: "headwear", "glasses", and "mask". + } + ], + "age": 0.0, # Optional. Age in years. + "blur": { + "blurLevel": "str", # An enum value indicating level + of blurriness. Required. Known values are: "low", "medium", and + "high". + "value": 0.0 # A number indicating level of + blurriness ranging from 0 to 1. Required. + }, + "exposure": { + "exposureLevel": "str", # An enum value indicating + level of exposure. Required. Known values are: "underExposure", + "goodExposure", and "overExposure". + "value": 0.0 # A number indicating level of exposure + level ranging from 0 to 1. [0, 0.25) is under exposure. [0.25, 0.75) + is good exposure. [0.75, 1] is over exposure. Required. + }, + "facialHair": { + "beard": 0.0, # A number ranging from 0 to 1 + indicating a level of confidence associated with a property. + Required. + "moustache": 0.0, # A number ranging from 0 to 1 + indicating a level of confidence associated with a property. + Required. + "sideburns": 0.0 # A number ranging from 0 to 1 + indicating a level of confidence associated with a property. + Required. + }, + "glasses": "str", # Optional. Glasses type if any of the + face. Known values are: "noGlasses", "readingGlasses", "sunglasses", and + "swimmingGoggles". + "hair": { + "bald": 0.0, # A number describing confidence level + of whether the person is bald. Required. + "hairColor": [ + { + "color": "str", # Name of the hair + color. Required. Known values are: "unknown", "white", + "gray", "blond", "brown", "red", "black", and "other". + "confidence": 0.0 # Confidence level + of the color. Range between [0,1]. Required. + } + ], + "invisible": bool # A boolean value describing + whether the hair is visible in the image. Required. + }, + "headPose": { + "pitch": 0.0, # Value of angles. Required. + "roll": 0.0, # Value of angles. Required. + "yaw": 0.0 # Value of angles. Required. + }, + "mask": { + "noseAndMouthCovered": bool, # A boolean value + indicating whether nose and mouth are covered. Required. + "type": "str" # Type of the mask. Required. Known + values are: "faceMask", "noMask", "otherMaskOrOcclusion", and + "uncertain". + }, + "noise": { + "noiseLevel": "str", # An enum value indicating + level of noise. Required. Known values are: "low", "medium", and + "high". + "value": 0.0 # A number indicating level of noise + level ranging from 0 to 1. [0, 0.25) is under exposure. [0.25, 0.75) + is good exposure. [0.75, 1] is over exposure. [0, 0.3) is low noise + level. [0.3, 0.7) is medium noise level. [0.7, 1] is high noise + level. Required. + }, + "occlusion": { + "eyeOccluded": bool, # A boolean value indicating + whether eyes are occluded. Required. + "foreheadOccluded": bool, # A boolean value + indicating whether forehead is occluded. Required. + "mouthOccluded": bool # A boolean value indicating + whether the mouth is occluded. Required. + }, + "qualityForRecognition": "str", # Optional. Properties + describing the overall image quality regarding whether the image being + used in the detection is of sufficient quality to attempt face + recognition on. Known values are: "low", "medium", and "high". + "smile": 0.0 # Optional. Smile intensity, a number between + [0,1]. + }, + "faceId": "str", # Optional. Unique faceId of the detected face, + created by detection API and it will expire 24 hours after the detection + call. To return this, it requires 'returnFaceId' parameter to be true. + "faceLandmarks": { + "eyeLeftBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeLeftInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeLeftOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeLeftTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyeRightTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowLeftInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowLeftOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowRightInner": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "eyebrowRightOuter": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "mouthLeft": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "mouthRight": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseLeftAlarOutTip": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseLeftAlarTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRightAlarOutTip": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRightAlarTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRootLeft": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseRootRight": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "noseTip": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "pupilLeft": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "pupilRight": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "underLipBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "underLipTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "upperLipBottom": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + }, + "upperLipTop": { + "x": 0.0, # The horizontal component, in pixels. + Required. + "y": 0.0 # The vertical component, in pixels. + Required. + } + }, + "recognitionModel": "str" # Optional. The 'recognitionModel' + associated with this faceId. This is only returned when + 'returnRecognitionModel' is explicitly set as true. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[List[_models.FaceDetectionResult]] = kwargs.pop("cls", None) + + _content = image_content + + _request = build_face_detect_request( + detection_model=detection_model, + recognition_model=recognition_model, + return_face_id=return_face_id, + return_face_attributes=return_face_attributes, + return_face_landmarks=return_face_landmarks, + return_recognition_model=return_recognition_model, + face_id_time_to_live=face_id_time_to_live, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceDetectionResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def find_similar( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a faceId array. A faceId + array contains the faces created by Detect. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target faceId array. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # faceId of the query face. User needs to call "Detect" + first to get a valid faceId. Note that this faceId is not persisted and will + expire 24 hours after the detection call. Required. + "faceIds": [ + "str" # An array of candidate faceIds. All of them are created by + "Detect" and the faceIds will expire 24 hours after the detection call. The + number of faceIds is limited to 1000. Required. + ], + "maxNumOfCandidatesReturned": 0, # Optional. The number of top similar faces + returned. The valid range is [1, 1000]. Default value is 20. + "mode": "str" # Optional. Similar face searching mode. It can be + 'matchPerson' or 'matchFace'. Default value is 'matchPerson'. Known values are: + "matchPerson" and "matchFace". + } + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @overload + async def find_similar( + self, + *, + face_id: str, + face_ids: List[str], + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + mode: Optional[Union[str, _models.FindSimilarMatchMode]] = None, + **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a faceId array. A faceId + array contains the faces created by Detect. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target faceId array. + + :keyword face_id: faceId of the query face. User needs to call "Detect" first to get a valid + faceId. Note that this faceId is not persisted and will expire 24 hours after the detection + call. Required. + :paramtype face_id: str + :keyword face_ids: An array of candidate faceIds. All of them are created by "Detect" and the + faceIds will expire 24 hours after the detection call. The number of faceIds is limited to + 1000. Required. + :paramtype face_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The number of top similar faces returned. The valid + range is [1, 1000]. Default value is 20. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword mode: Similar face searching mode. It can be 'matchPerson' or 'matchFace'. Default + value is 'matchPerson'. Known values are: "matchPerson" and "matchFace". Default value is None. + :paramtype mode: str or ~azure.ai.vision.face.models.FindSimilarMatchMode + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @overload + async def find_similar( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a faceId array. A faceId + array contains the faces created by Detect. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target faceId array. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @distributed_trace_async + async def find_similar( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_id: str = _Unset, + face_ids: List[str] = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + mode: Optional[Union[str, _models.FindSimilarMatchMode]] = None, + **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a faceId array. A faceId + array contains the faces created by Detect. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target faceId array. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id: faceId of the query face. User needs to call "Detect" first to get a valid + faceId. Note that this faceId is not persisted and will expire 24 hours after the detection + call. Required. + :paramtype face_id: str + :keyword face_ids: An array of candidate faceIds. All of them are created by "Detect" and the + faceIds will expire 24 hours after the detection call. The number of faceIds is limited to + 1000. Required. + :paramtype face_ids: list[str] + :keyword max_num_of_candidates_returned: The number of top similar faces returned. The valid + range is [1, 1000]. Default value is 20. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword mode: Similar face searching mode. It can be 'matchPerson' or 'matchFace'. Default + value is 'matchPerson'. Known values are: "matchPerson" and "matchFace". Default value is None. + :paramtype mode: str or ~azure.ai.vision.face.models.FindSimilarMatchMode + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # faceId of the query face. User needs to call "Detect" + first to get a valid faceId. Note that this faceId is not persisted and will + expire 24 hours after the detection call. Required. + "faceIds": [ + "str" # An array of candidate faceIds. All of them are created by + "Detect" and the faceIds will expire 24 hours after the detection call. The + number of faceIds is limited to 1000. Required. + ], + "maxNumOfCandidatesReturned": 0, # Optional. The number of top similar faces + returned. The valid range is [1, 1000]. Default value is 20. + "mode": "str" # Optional. Similar face searching mode. It can be + 'matchPerson' or 'matchFace'. Default value is 'matchPerson'. Known values are: + "matchPerson" and "matchFace". + } + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceFindSimilarResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id is _Unset: + raise TypeError("missing required argument: face_id") + if face_ids is _Unset: + raise TypeError("missing required argument: face_ids") + body = { + "faceid": face_id, + "faceids": face_ids, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + "mode": mode, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_find_similar_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceFindSimilarResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def find_similar_from_face_list( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Face List. A 'faceListId' + is created by Create Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Face List. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # faceId of the query face. User needs to call "Detect" + first to get a valid faceId. Note that this faceId is not persisted and will + expire 24 hours after the detection call. Required. + "faceListId": "str", # An existing user-specified unique candidate Face + List, created in "Create Face List". Face List contains a set of persistedFaceIds + which are persisted and will never expire. Required. + "maxNumOfCandidatesReturned": 0, # Optional. The number of top similar faces + returned. The valid range is [1, 1000]. Default value is 20. + "mode": "str" # Optional. Similar face searching mode. It can be + 'matchPerson' or 'matchFace'. Default value is 'matchPerson'. Known values are: + "matchPerson" and "matchFace". + } + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @overload + async def find_similar_from_face_list( + self, + *, + face_id: str, + face_list_id: str, + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + mode: Optional[Union[str, _models.FindSimilarMatchMode]] = None, + **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Face List. A 'faceListId' + is created by Create Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Face List. + + :keyword face_id: faceId of the query face. User needs to call "Detect" first to get a valid + faceId. Note that this faceId is not persisted and will expire 24 hours after the detection + call. Required. + :paramtype face_id: str + :keyword face_list_id: An existing user-specified unique candidate Face List, created in + "Create Face List". Face List contains a set of persistedFaceIds which are persisted and will + never expire. Required. + :paramtype face_list_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The number of top similar faces returned. The valid + range is [1, 1000]. Default value is 20. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword mode: Similar face searching mode. It can be 'matchPerson' or 'matchFace'. Default + value is 'matchPerson'. Known values are: "matchPerson" and "matchFace". Default value is None. + :paramtype mode: str or ~azure.ai.vision.face.models.FindSimilarMatchMode + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @overload + async def find_similar_from_face_list( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Face List. A 'faceListId' + is created by Create Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Face List. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @distributed_trace_async + async def find_similar_from_face_list( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_id: str = _Unset, + face_list_id: str = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + mode: Optional[Union[str, _models.FindSimilarMatchMode]] = None, + **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Face List. A 'faceListId' + is created by Create Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Face List. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id: faceId of the query face. User needs to call "Detect" first to get a valid + faceId. Note that this faceId is not persisted and will expire 24 hours after the detection + call. Required. + :paramtype face_id: str + :keyword face_list_id: An existing user-specified unique candidate Face List, created in + "Create Face List". Face List contains a set of persistedFaceIds which are persisted and will + never expire. Required. + :paramtype face_list_id: str + :keyword max_num_of_candidates_returned: The number of top similar faces returned. The valid + range is [1, 1000]. Default value is 20. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword mode: Similar face searching mode. It can be 'matchPerson' or 'matchFace'. Default + value is 'matchPerson'. Known values are: "matchPerson" and "matchFace". Default value is None. + :paramtype mode: str or ~azure.ai.vision.face.models.FindSimilarMatchMode + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # faceId of the query face. User needs to call "Detect" + first to get a valid faceId. Note that this faceId is not persisted and will + expire 24 hours after the detection call. Required. + "faceListId": "str", # An existing user-specified unique candidate Face + List, created in "Create Face List". Face List contains a set of persistedFaceIds + which are persisted and will never expire. Required. + "maxNumOfCandidatesReturned": 0, # Optional. The number of top similar faces + returned. The valid range is [1, 1000]. Default value is 20. + "mode": "str" # Optional. Similar face searching mode. It can be + 'matchPerson' or 'matchFace'. Default value is 'matchPerson'. Known values are: + "matchPerson" and "matchFace". + } + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceFindSimilarResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id is _Unset: + raise TypeError("missing required argument: face_id") + if face_list_id is _Unset: + raise TypeError("missing required argument: face_list_id") + body = { + "faceid": face_id, + "facelistid": face_list_id, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + "mode": mode, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_find_similar_from_face_list_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceFindSimilarResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def find_similar_from_large_face_list( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Large Face List. A + 'largeFaceListId' is created by Create Large Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Large Face List. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # faceId of the query face. User needs to call "Detect" + first to get a valid faceId. Note that this faceId is not persisted and will + expire 24 hours after the detection call. Required. + "largeFaceListId": "str", # An existing user-specified unique candidate + Large Face List, created in "Create Large Face List". Large Face List contains a + set of persistedFaceIds which are persisted and will never expire. Required. + "maxNumOfCandidatesReturned": 0, # Optional. The number of top similar faces + returned. The valid range is [1, 1000]. Default value is 20. + "mode": "str" # Optional. Similar face searching mode. It can be + 'matchPerson' or 'matchFace'. Default value is 'matchPerson'. Known values are: + "matchPerson" and "matchFace". + } + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @overload + async def find_similar_from_large_face_list( + self, + *, + face_id: str, + large_face_list_id: str, + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + mode: Optional[Union[str, _models.FindSimilarMatchMode]] = None, + **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Large Face List. A + 'largeFaceListId' is created by Create Large Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Large Face List. + + :keyword face_id: faceId of the query face. User needs to call "Detect" first to get a valid + faceId. Note that this faceId is not persisted and will expire 24 hours after the detection + call. Required. + :paramtype face_id: str + :keyword large_face_list_id: An existing user-specified unique candidate Large Face List, + created in "Create Large Face List". Large Face List contains a set of persistedFaceIds which + are persisted and will never expire. Required. + :paramtype large_face_list_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The number of top similar faces returned. The valid + range is [1, 1000]. Default value is 20. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword mode: Similar face searching mode. It can be 'matchPerson' or 'matchFace'. Default + value is 'matchPerson'. Known values are: "matchPerson" and "matchFace". Default value is None. + :paramtype mode: str or ~azure.ai.vision.face.models.FindSimilarMatchMode + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @overload + async def find_similar_from_large_face_list( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Large Face List. A + 'largeFaceListId' is created by Create Large Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Large Face List. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + + @distributed_trace_async + async def find_similar_from_large_face_list( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_id: str = _Unset, + large_face_list_id: str = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + mode: Optional[Union[str, _models.FindSimilarMatchMode]] = None, + **kwargs: Any + ) -> List[_models.FaceFindSimilarResult]: + # pylint: disable=line-too-long + """Given query face's faceId, to search the similar-looking faces from a Large Face List. A + 'largeFaceListId' is created by Create Large Face List. + + Depending on the input the returned similar faces list contains faceIds or persistedFaceIds + ranked by similarity. + + Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default + mode that it tries to find faces of the same person as possible by using internal same-person + thresholds. It is useful to find a known person's other photos. Note that an empty list will be + returned if no faces pass the internal thresholds. "matchFace" mode ignores same-person + thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used + in the cases like searching celebrity-looking faces. + + The 'recognitionModel' associated with the query faceId should be the same as the + 'recognitionModel' used by the target Large Face List. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id: faceId of the query face. User needs to call "Detect" first to get a valid + faceId. Note that this faceId is not persisted and will expire 24 hours after the detection + call. Required. + :paramtype face_id: str + :keyword large_face_list_id: An existing user-specified unique candidate Large Face List, + created in "Create Large Face List". Large Face List contains a set of persistedFaceIds which + are persisted and will never expire. Required. + :paramtype large_face_list_id: str + :keyword max_num_of_candidates_returned: The number of top similar faces returned. The valid + range is [1, 1000]. Default value is 20. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword mode: Similar face searching mode. It can be 'matchPerson' or 'matchFace'. Default + value is 'matchPerson'. Known values are: "matchPerson" and "matchFace". Default value is None. + :paramtype mode: str or ~azure.ai.vision.face.models.FindSimilarMatchMode + :return: list of FaceFindSimilarResult + :rtype: list[~azure.ai.vision.face.models.FaceFindSimilarResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # faceId of the query face. User needs to call "Detect" + first to get a valid faceId. Note that this faceId is not persisted and will + expire 24 hours after the detection call. Required. + "largeFaceListId": "str", # An existing user-specified unique candidate + Large Face List, created in "Create Large Face List". Large Face List contains a + set of persistedFaceIds which are persisted and will never expire. Required. + "maxNumOfCandidatesReturned": 0, # Optional. The number of top similar faces + returned. The valid range is [1, 1000]. Default value is 20. + "mode": "str" # Optional. Similar face searching mode. It can be + 'matchPerson' or 'matchFace'. Default value is 'matchPerson'. Known values are: + "matchPerson" and "matchFace". + } + + # response body for status code(s): 200 + response == [ + { + "confidence": 0.0, # Confidence value of the candidate. The higher + confidence, the more similar. Range between [0,1]. Required. + "faceId": "str", # Optional. faceId of candidate face when find by + faceIds. faceId is created by "Detect" and will expire 24 hours after the + detection call. + "persistedFaceId": "str" # Optional. persistedFaceId of candidate + face when find by faceListId or largeFaceListId. persistedFaceId in face + list/large face list is persisted and will not expire. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceFindSimilarResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id is _Unset: + raise TypeError("missing required argument: face_id") + if large_face_list_id is _Unset: + raise TypeError("missing required argument: large_face_list_id") + body = { + "faceid": face_id, + "largefacelistid": large_face_list_id, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + "mode": mode, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_find_similar_from_large_face_list_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceFindSimilarResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def identify_from_person_group( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Group (given by personGroupId), and return candidate + person(s) for that face ranked by similarity confidence. The Person Group should be trained to + make it ready for identification. See more in "Train Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "personGroupId": "str", # personGroupId of the target Person Group, created + by "Create Person Group". Parameter personGroupId and largePersonGroupId should + not be provided at the same time. Required. + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + async def identify_from_person_group( + self, + *, + face_ids: List[str], + person_group_id: str, + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Group (given by personGroupId), and return candidate + person(s) for that face ranked by similarity confidence. The Person Group should be trained to + make it ready for identification. See more in "Train Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group. + + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword person_group_id: personGroupId of the target Person Group, created by "Create Person + Group". Parameter personGroupId and largePersonGroupId should not be provided at the same time. + Required. + :paramtype person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + async def identify_from_person_group( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Group (given by personGroupId), and return candidate + person(s) for that face ranked by similarity confidence. The Person Group should be trained to + make it ready for identification. See more in "Train Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @distributed_trace_async + async def identify_from_person_group( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_ids: List[str] = _Unset, + person_group_id: str = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Group (given by personGroupId), and return candidate + person(s) for that face ranked by similarity confidence. The Person Group should be trained to + make it ready for identification. See more in "Train Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword person_group_id: personGroupId of the target Person Group, created by "Create Person + Group". Parameter personGroupId and largePersonGroupId should not be provided at the same time. + Required. + :paramtype person_group_id: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "personGroupId": "str", # personGroupId of the target Person Group, created + by "Create Person Group". Parameter personGroupId and largePersonGroupId should + not be provided at the same time. Required. + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceIdentificationResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_ids is _Unset: + raise TypeError("missing required argument: face_ids") + if person_group_id is _Unset: + raise TypeError("missing required argument: person_group_id") + body = { + "confidencethreshold": confidence_threshold, + "faceids": face_ids, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + "persongroupid": person_group_id, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_identify_from_person_group_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceIdentificationResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def identify_from_large_person_group( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Large Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Large Person Group (given by largePersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. The Large Person Group + should be trained to make it ready for identification. See more in "Train Large Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group/Large Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group or Large Person Group. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "largePersonGroupId": "str", # largePersonGroupId of the target Large Person + Group, created by "Create Large Person Group". Parameter personGroupId and + largePersonGroupId should not be provided at the same time. Required. + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + async def identify_from_large_person_group( + self, + *, + face_ids: List[str], + large_person_group_id: str, + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Large Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Large Person Group (given by largePersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. The Large Person Group + should be trained to make it ready for identification. See more in "Train Large Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group/Large Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group or Large Person Group. + + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword large_person_group_id: largePersonGroupId of the target Large Person Group, created by + "Create Large Person Group". Parameter personGroupId and largePersonGroupId should not be + provided at the same time. Required. + :paramtype large_person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + async def identify_from_large_person_group( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Large Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Large Person Group (given by largePersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. The Large Person Group + should be trained to make it ready for identification. See more in "Train Large Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group/Large Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group or Large Person Group. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @distributed_trace_async + async def identify_from_large_person_group( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_ids: List[str] = _Unset, + large_person_group_id: str = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Large Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Large Person Group (given by largePersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. The Large Person Group + should be trained to make it ready for identification. See more in "Train Large Person Group". + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * Try "Find Similar" when you need to find similar faces from a Face List/Large Face List + instead of a Person Group/Large Person Group. + * The 'recognitionModel' associated with the query faces' faceIds should be the same as the + 'recognitionModel' used by the target Person Group or Large Person Group. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword large_person_group_id: largePersonGroupId of the target Large Person Group, created by + "Create Large Person Group". Parameter personGroupId and largePersonGroupId should not be + provided at the same time. Required. + :paramtype large_person_group_id: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "largePersonGroupId": "str", # largePersonGroupId of the target Large Person + Group, created by "Create Large Person Group". Parameter personGroupId and + largePersonGroupId should not be provided at the same time. Required. + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceIdentificationResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_ids is _Unset: + raise TypeError("missing required argument: face_ids") + if large_person_group_id is _Unset: + raise TypeError("missing required argument: large_person_group_id") + body = { + "confidencethreshold": confidence_threshold, + "faceids": face_ids, + "largepersongroupid": large_person_group_id, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_identify_from_large_person_group_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceIdentificationResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def identify_from_person_directory( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + person directory personIds array. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Directory Persons (given by personIds), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "personIds": [ + "str" # Array of personIds created in Person Directory "Create + Person". The valid number of personIds is between [1,30]. Required. + ], + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + async def identify_from_person_directory( + self, + *, + face_ids: List[str], + person_ids: List[str], + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + person directory personIds array. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Directory Persons (given by personIds), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword person_ids: Array of personIds created in Person Directory "Create Person". The valid + number of personIds is between [1,30]. Required. + :paramtype person_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + async def identify_from_person_directory( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + person directory personIds array. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Directory Persons (given by personIds), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @distributed_trace_async + async def identify_from_person_directory( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_ids: List[str] = _Unset, + person_ids: List[str] = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + person directory personIds array. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Person Directory Persons (given by personIds), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword person_ids: Array of personIds created in Person Directory "Create Person". The valid + number of personIds is between [1,30]. Required. + :paramtype person_ids: list[str] + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "personIds": [ + "str" # Array of personIds created in Person Directory "Create + Person". The valid number of personIds is between [1,30]. Required. + ], + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceIdentificationResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_ids is _Unset: + raise TypeError("missing required argument: face_ids") + if person_ids is _Unset: + raise TypeError("missing required argument: person_ids") + body = { + "confidencethreshold": confidence_threshold, + "faceids": face_ids, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + "personids": person_ids, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_identify_from_person_directory_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceIdentificationResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def identify_from_dynamic_person_group( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Dynamic Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Dynamic Person Group (given by dynamicPersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "dynamicPersonGroupId": "str", # DynamicPersonGroupId of the target + PersonDirectory DynamicPersonGroup to match against. Required. + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + async def identify_from_dynamic_person_group( + self, + *, + face_ids: List[str], + dynamic_person_group_id: str, + content_type: str = "application/json", + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Dynamic Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Dynamic Person Group (given by dynamicPersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword dynamic_person_group_id: DynamicPersonGroupId of the target PersonDirectory + DynamicPersonGroup to match against. Required. + :paramtype dynamic_person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @overload + async def identify_from_dynamic_person_group( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Dynamic Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Dynamic Person Group (given by dynamicPersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + + @distributed_trace_async + async def identify_from_dynamic_person_group( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_ids: List[str] = _Unset, + dynamic_person_group_id: str = _Unset, + max_num_of_candidates_returned: Optional[int] = None, + confidence_threshold: Optional[float] = None, + **kwargs: Any + ) -> List[_models.FaceIdentificationResult]: + # pylint: disable=line-too-long + """1-to-many identification to find the closest matches of the specific query person face from a + Dynamic Person Group. + + For each face in the faceIds array, Face Identify will compute similarities between the query + face and all the faces in the Dynamic Person Group (given by dynamicPersonGroupId), and return + candidate person(s) for that face ranked by similarity confidence. + + .. + + [!NOTE] + + * + + + * The algorithm allows more than one face to be identified independently at the same + request, but no more than 10 faces. + * Each person could have more than one face, but no more than 248 faces. + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * Number of candidates returned is restricted by maxNumOfCandidatesReturned and + confidenceThreshold. If no person is identified, the returned candidates will be an empty + array. + * The Identify operation can only match faces obtained with the same recognition model, that + is associated with the query faces. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_ids: Array of query faces faceIds, created by the "Detect". Each of the faces are + identified independently. The valid number of faceIds is between [1, 10]. Required. + :paramtype face_ids: list[str] + :keyword dynamic_person_group_id: DynamicPersonGroupId of the target PersonDirectory + DynamicPersonGroup to match against. Required. + :paramtype dynamic_person_group_id: str + :keyword max_num_of_candidates_returned: The range of maxNumOfCandidatesReturned is between 1 + and 100. Default value is 10. Default value is None. + :paramtype max_num_of_candidates_returned: int + :keyword confidence_threshold: Customized identification confidence threshold, in the range of + [0, 1]. Advanced user can tweak this value to override default internal threshold for better + precision on their scenario data. Note there is no guarantee of this threshold value working on + other data and after algorithm updates. Default value is None. + :paramtype confidence_threshold: float + :return: list of FaceIdentificationResult + :rtype: list[~azure.ai.vision.face.models.FaceIdentificationResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "dynamicPersonGroupId": "str", # DynamicPersonGroupId of the target + PersonDirectory DynamicPersonGroup to match against. Required. + "faceIds": [ + "str" # Array of query faces faceIds, created by the "Detect". Each + of the faces are identified independently. The valid number of faceIds is + between [1, 10]. Required. + ], + "confidenceThreshold": 0.0, # Optional. Customized identification confidence + threshold, in the range of [0, 1]. Advanced user can tweak this value to override + default internal threshold for better precision on their scenario data. Note + there is no guarantee of this threshold value working on other data and after + algorithm updates. + "maxNumOfCandidatesReturned": 0 # Optional. The range of + maxNumOfCandidatesReturned is between 1 and 100. Default value is 10. + } + + # response body for status code(s): 200 + response == [ + { + "candidates": [ + { + "confidence": 0.0, # Confidence value of the + candidate. The higher confidence, the more similar. Range between + [0,1]. Required. + "personId": "str" # personId of candidate person. + Required. + } + ], + "faceId": "str" # faceId of the query face. Required. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[List[_models.FaceIdentificationResult]] = kwargs.pop("cls", None) + + if body is _Unset: + if face_ids is _Unset: + raise TypeError("missing required argument: face_ids") + if dynamic_person_group_id is _Unset: + raise TypeError("missing required argument: dynamic_person_group_id") + body = { + "confidencethreshold": confidence_threshold, + "dynamicpersongroupid": dynamic_person_group_id, + "faceids": face_ids, + "maxnumofcandidatesreturned": max_num_of_candidates_returned, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_identify_from_dynamic_person_group_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceIdentificationResult], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def verify_face_to_face( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether two faces belong to a same person. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the both faces should be the same. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId1": "str", # The faceId of one face, come from "Detect". Required. + "faceId2": "str" # The faceId of another face, come from "Detect". Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + async def verify_face_to_face( + self, *, face_id1: str, face_id2: str, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether two faces belong to a same person. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the both faces should be the same. + + :keyword face_id1: The faceId of one face, come from "Detect". Required. + :paramtype face_id1: str + :keyword face_id2: The faceId of another face, come from "Detect". Required. + :paramtype face_id2: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + async def verify_face_to_face( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether two faces belong to a same person. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the both faces should be the same. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @distributed_trace_async + async def verify_face_to_face( + self, body: Union[JSON, IO[bytes]] = _Unset, *, face_id1: str = _Unset, face_id2: str = _Unset, **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether two faces belong to a same person. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the both faces should be the same. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id1: The faceId of one face, come from "Detect". Required. + :paramtype face_id1: str + :keyword face_id2: The faceId of another face, come from "Detect". Required. + :paramtype face_id2: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId1": "str", # The faceId of one face, come from "Detect". Required. + "faceId2": "str" # The faceId of another face, come from "Detect". Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FaceVerificationResult] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id1 is _Unset: + raise TypeError("missing required argument: face_id1") + if face_id2 is _Unset: + raise TypeError("missing required argument: face_id2") + body = {"faceid1": face_id1, "faceid2": face_id2} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_verify_face_to_face_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceVerificationResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def verify_from_person_group( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Person Group. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # The faceId of the face, come from "Detect". Required. + "personGroupId": "str", # Using existing personGroupId and personId for fast + loading a specified person. personGroupId is created in "Create Person Group". + Required. + "personId": "str" # Specify a certain person in Person Group. Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + async def verify_from_person_group( + self, + *, + face_id: str, + person_group_id: str, + person_id: str, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Person Group. + + :keyword face_id: The faceId of the face, come from "Detect". Required. + :paramtype face_id: str + :keyword person_group_id: Using existing personGroupId and personId for fast loading a + specified person. personGroupId is created in "Create Person Group". Required. + :paramtype person_group_id: str + :keyword person_id: Specify a certain person in Person Group. Required. + :paramtype person_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + async def verify_from_person_group( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Person Group. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @distributed_trace_async + async def verify_from_person_group( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_id: str = _Unset, + person_group_id: str = _Unset, + person_id: str = _Unset, + **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Person Group. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id: The faceId of the face, come from "Detect". Required. + :paramtype face_id: str + :keyword person_group_id: Using existing personGroupId and personId for fast loading a + specified person. personGroupId is created in "Create Person Group". Required. + :paramtype person_group_id: str + :keyword person_id: Specify a certain person in Person Group. Required. + :paramtype person_id: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # The faceId of the face, come from "Detect". Required. + "personGroupId": "str", # Using existing personGroupId and personId for fast + loading a specified person. personGroupId is created in "Create Person Group". + Required. + "personId": "str" # Specify a certain person in Person Group. Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FaceVerificationResult] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id is _Unset: + raise TypeError("missing required argument: face_id") + if person_group_id is _Unset: + raise TypeError("missing required argument: person_group_id") + if person_id is _Unset: + raise TypeError("missing required argument: person_id") + body = {"faceid": face_id, "persongroupid": person_group_id, "personid": person_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_verify_from_person_group_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceVerificationResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def verify_from_large_person_group( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Large Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Large Person Group. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # The faceId of the face, come from "Detect". Required. + "largePersonGroupId": "str", # Using existing largePersonGroupId and + personId for fast loading a specified person. largePersonGroupId is created in + "Create Large Person Group". Required. + "personId": "str" # Specify a certain person in Large Person Group. + Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + async def verify_from_large_person_group( + self, + *, + face_id: str, + large_person_group_id: str, + person_id: str, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Large Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Large Person Group. + + :keyword face_id: The faceId of the face, come from "Detect". Required. + :paramtype face_id: str + :keyword large_person_group_id: Using existing largePersonGroupId and personId for fast loading + a specified person. largePersonGroupId is created in "Create Large Person Group". Required. + :paramtype large_person_group_id: str + :keyword person_id: Specify a certain person in Large Person Group. Required. + :paramtype person_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + async def verify_from_large_person_group( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Large Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Large Person Group. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @distributed_trace_async + async def verify_from_large_person_group( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + face_id: str = _Unset, + large_person_group_id: str = _Unset, + person_id: str = _Unset, + **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in a Large Person Group. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The 'recognitionModel' associated with the query face should be the same as the + 'recognitionModel' used by the Large Person Group. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id: The faceId of the face, come from "Detect". Required. + :paramtype face_id: str + :keyword large_person_group_id: Using existing largePersonGroupId and personId for fast loading + a specified person. largePersonGroupId is created in "Create Large Person Group". Required. + :paramtype large_person_group_id: str + :keyword person_id: Specify a certain person in Large Person Group. Required. + :paramtype person_id: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # The faceId of the face, come from "Detect". Required. + "largePersonGroupId": "str", # Using existing largePersonGroupId and + personId for fast loading a specified person. largePersonGroupId is created in + "Create Large Person Group". Required. + "personId": "str" # Specify a certain person in Large Person Group. + Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FaceVerificationResult] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id is _Unset: + raise TypeError("missing required argument: face_id") + if large_person_group_id is _Unset: + raise TypeError("missing required argument: large_person_group_id") + if person_id is _Unset: + raise TypeError("missing required argument: person_id") + body = {"faceid": face_id, "largepersongroupid": large_person_group_id, "personid": person_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_verify_from_large_person_group_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceVerificationResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def verify_from_person_directory( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in Person Directory. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The Verify operation can only match faces obtained with the same recognition model, that + is associated with the query face. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # The faceId of the face, come from "Detect". Required. + "personId": "str" # Specify a certain person in PersonDirectory Person. + Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + async def verify_from_person_directory( + self, *, face_id: str, person_id: str, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in Person Directory. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The Verify operation can only match faces obtained with the same recognition model, that + is associated with the query face. + + :keyword face_id: The faceId of the face, come from "Detect". Required. + :paramtype face_id: str + :keyword person_id: Specify a certain person in PersonDirectory Person. Required. + :paramtype person_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @overload + async def verify_from_person_directory( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in Person Directory. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The Verify operation can only match faces obtained with the same recognition model, that + is associated with the query face. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + + @distributed_trace_async + async def verify_from_person_directory( + self, body: Union[JSON, IO[bytes]] = _Unset, *, face_id: str = _Unset, person_id: str = _Unset, **kwargs: Any + ) -> _models.FaceVerificationResult: + # pylint: disable=line-too-long + """Verify whether a face belongs to a person in Person Directory. + + .. + + [!NOTE] + + * + + + * Higher face image quality means better identification precision. Please consider + high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) + or bigger. + * For the scenarios that are sensitive to accuracy please make your own judgment. + * The Verify operation can only match faces obtained with the same recognition model, that + is associated with the query face. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_id: The faceId of the face, come from "Detect". Required. + :paramtype face_id: str + :keyword person_id: Specify a certain person in PersonDirectory Person. Required. + :paramtype person_id: str + :return: FaceVerificationResult. The FaceVerificationResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceVerificationResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceId": "str", # The faceId of the face, come from "Detect". Required. + "personId": "str" # Specify a certain person in PersonDirectory Person. + Required. + } + + # response body for status code(s): 200 + response == { + "confidence": 0.0, # A number indicates the similarity confidence of whether + two faces belong to the same person, or whether the face belongs to the person. + By default, isIdentical is set to True if similarity confidence is greater than + or equal to 0.5. This is useful for advanced users to override 'isIdentical' and + fine-tune the result on their own data. Required. + "isIdentical": bool # True if the two faces belong to the same person or the + face belongs to the person, otherwise false. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FaceVerificationResult] = kwargs.pop("cls", None) + + if body is _Unset: + if face_id is _Unset: + raise TypeError("missing required argument: face_id") + if person_id is _Unset: + raise TypeError("missing required argument: person_id") + body = {"faceid": face_id, "personid": person_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_verify_from_person_directory_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceVerificationResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def group( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceGroupingResult: + # pylint: disable=line-too-long + """Divide candidate faces into groups based on face similarity. + + > + * + + + * The output is one or more disjointed face groups and a messyGroup. A face group contains + faces that have similar looking, often of the same person. Face groups are ranked by group + size, i.e. number of faces. Notice that faces belonging to a same person might be split into + several groups in the result. + * MessyGroup is a special face group containing faces that cannot find any similar counterpart + face from original faces. The messyGroup will not appear in the result if all faces found their + counterparts. + * Group API needs at least 2 candidate faces and 1000 at most. We suggest to try "Verify Face + To Face" when you only have 2 candidate faces. + * The 'recognitionModel' associated with the query faces' faceIds should be the same. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceGroupingResult. The FaceGroupingResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceGroupingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of candidate faceIds created by "Detect". The maximum + is 1000 faces. Required. + ] + } + + # response body for status code(s): 200 + response == { + "groups": [ + [ + "str" # A partition of the original faces based on face + similarity. Groups are ranked by number of faces. Required. + ] + ], + "messyGroup": [ + "str" # Face ids array of faces that cannot find any similar faces + from original faces. Required. + ] + } + """ + + @overload + async def group( + self, *, face_ids: List[str], content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceGroupingResult: + # pylint: disable=line-too-long + """Divide candidate faces into groups based on face similarity. + + > + * + + + * The output is one or more disjointed face groups and a messyGroup. A face group contains + faces that have similar looking, often of the same person. Face groups are ranked by group + size, i.e. number of faces. Notice that faces belonging to a same person might be split into + several groups in the result. + * MessyGroup is a special face group containing faces that cannot find any similar counterpart + face from original faces. The messyGroup will not appear in the result if all faces found their + counterparts. + * Group API needs at least 2 candidate faces and 1000 at most. We suggest to try "Verify Face + To Face" when you only have 2 candidate faces. + * The 'recognitionModel' associated with the query faces' faceIds should be the same. + + :keyword face_ids: Array of candidate faceIds created by "Detect". The maximum is 1000 faces. + Required. + :paramtype face_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceGroupingResult. The FaceGroupingResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceGroupingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "groups": [ + [ + "str" # A partition of the original faces based on face + similarity. Groups are ranked by number of faces. Required. + ] + ], + "messyGroup": [ + "str" # Face ids array of faces that cannot find any similar faces + from original faces. Required. + ] + } + """ + + @overload + async def group( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.FaceGroupingResult: + # pylint: disable=line-too-long + """Divide candidate faces into groups based on face similarity. + + > + * + + + * The output is one or more disjointed face groups and a messyGroup. A face group contains + faces that have similar looking, often of the same person. Face groups are ranked by group + size, i.e. number of faces. Notice that faces belonging to a same person might be split into + several groups in the result. + * MessyGroup is a special face group containing faces that cannot find any similar counterpart + face from original faces. The messyGroup will not appear in the result if all faces found their + counterparts. + * Group API needs at least 2 candidate faces and 1000 at most. We suggest to try "Verify Face + To Face" when you only have 2 candidate faces. + * The 'recognitionModel' associated with the query faces' faceIds should be the same. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: FaceGroupingResult. The FaceGroupingResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceGroupingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "groups": [ + [ + "str" # A partition of the original faces based on face + similarity. Groups are ranked by number of faces. Required. + ] + ], + "messyGroup": [ + "str" # Face ids array of faces that cannot find any similar faces + from original faces. Required. + ] + } + """ + + @distributed_trace_async + async def group( + self, body: Union[JSON, IO[bytes]] = _Unset, *, face_ids: List[str] = _Unset, **kwargs: Any + ) -> _models.FaceGroupingResult: + # pylint: disable=line-too-long + """Divide candidate faces into groups based on face similarity. + + > + * + + + * The output is one or more disjointed face groups and a messyGroup. A face group contains + faces that have similar looking, often of the same person. Face groups are ranked by group + size, i.e. number of faces. Notice that faces belonging to a same person might be split into + several groups in the result. + * MessyGroup is a special face group containing faces that cannot find any similar counterpart + face from original faces. The messyGroup will not appear in the result if all faces found their + counterparts. + * Group API needs at least 2 candidate faces and 1000 at most. We suggest to try "Verify Face + To Face" when you only have 2 candidate faces. + * The 'recognitionModel' associated with the query faces' faceIds should be the same. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword face_ids: Array of candidate faceIds created by "Detect". The maximum is 1000 faces. + Required. + :paramtype face_ids: list[str] + :return: FaceGroupingResult. The FaceGroupingResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceGroupingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "faceIds": [ + "str" # Array of candidate faceIds created by "Detect". The maximum + is 1000 faces. Required. + ] + } + + # response body for status code(s): 200 + response == { + "groups": [ + [ + "str" # A partition of the original faces based on face + similarity. Groups are ranked by number of faces. Required. + ] + ], + "messyGroup": [ + "str" # Face ids array of faces that cannot find any similar faces + from original faces. Required. + ] + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FaceGroupingResult] = kwargs.pop("cls", None) + + if body is _Unset: + if face_ids is _Unset: + raise TypeError("missing required argument: face_ids") + body = {"faceids": face_ids} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_group_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceGroupingResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class FaceAdministrationClientOperationsMixin( # pylint: disable=too-many-public-methods + FaceAdministrationClientMixinABC +): + + @overload + async def create_face_list( # pylint: disable=inconsistent-return-statements + self, face_list_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + # pylint: disable=line-too-long + """Create an empty Face List with user-specified faceListId, name, an optional userData and + recognitionModel. + + Up to 64 Face Lists are allowed in one subscription. + + Face List is a list of faces, up to 1,000 faces, and used by "Find Similar From Face List". + + After creation, user should use "Add Face List Face" to import the faces. No image will be + stored. Only the extracted face feature(s) will be stored on server until "Delete Face List" is + called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + Please consider Large Face List when the face number is large. It can support up to 1,000,000 + faces. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def create_face_list( # pylint: disable=inconsistent-return-statements + self, + face_list_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any + ) -> None: + """Create an empty Face List with user-specified faceListId, name, an optional userData and + recognitionModel. + + Up to 64 Face Lists are allowed in one subscription. + + Face List is a list of faces, up to 1,000 faces, and used by "Find Similar From Face List". + + After creation, user should use "Add Face List Face" to import the faces. No image will be + stored. Only the extracted face feature(s) will be stored on server until "Delete Face List" is + called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + Please consider Large Face List when the face number is large. It can support up to 1,000,000 + faces. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_face_list( # pylint: disable=inconsistent-return-statements + self, face_list_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Create an empty Face List with user-specified faceListId, name, an optional userData and + recognitionModel. + + Up to 64 Face Lists are allowed in one subscription. + + Face List is a list of faces, up to 1,000 faces, and used by "Find Similar From Face List". + + After creation, user should use "Add Face List Face" to import the faces. No image will be + stored. Only the extracted face feature(s) will be stored on server until "Delete Face List" is + called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + Please consider Large Face List when the face number is large. It can support up to 1,000,000 + faces. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_face_list( # pylint: disable=inconsistent-return-statements + self, + face_list_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any + ) -> None: + # pylint: disable=line-too-long + """Create an empty Face List with user-specified faceListId, name, an optional userData and + recognitionModel. + + Up to 64 Face Lists are allowed in one subscription. + + Face List is a list of faces, up to 1,000 faces, and used by "Find Similar From Face List". + + After creation, user should use "Add Face List Face" to import the faces. No image will be + stored. Only the extracted face feature(s) will be stored on server until "Delete Face List" is + called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + Please consider Large Face List when the face number is large. It can support up to 1,000,000 + faces. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "recognitionmodel": recognition_model, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_face_list_request( + face_list_id=face_list_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def delete_face_list( # pylint: disable=inconsistent-return-statements + self, face_list_id: str, **kwargs: Any + ) -> None: + """Delete a specified Face List. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_face_list_request( + face_list_id=face_list_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_face_list( + self, face_list_id: str, *, return_recognition_model: Optional[bool] = None, **kwargs: Any + ) -> _models.FaceList: + # pylint: disable=line-too-long + """Retrieve a Face List's faceListId, name, userData, recognitionModel and faces in the Face List. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: FaceList. The FaceList is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceList + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "faceListId": "str", # Valid character is letter in lower case or digit or + '-' or '_', maximum length is 64. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "persistedFaces": [ + { + "persistedFaceId": "str", # Face ID of the face. Required. + "userData": "str" # Optional. User-provided data attached to + the face. The length limit is 1K. + } + ], + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FaceList] = kwargs.pop("cls", None) + + _request = build_face_administration_get_face_list_request( + face_list_id=face_list_id, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceList, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_face_list( # pylint: disable=inconsistent-return-statements + self, face_list_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update information of a Face List, including name and userData. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def update_face_list( # pylint: disable=inconsistent-return-statements + self, + face_list_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update information of a Face List, including name and userData. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_face_list( # pylint: disable=inconsistent-return-statements + self, face_list_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update information of a Face List, including name and userData. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_face_list( # pylint: disable=inconsistent-return-statements + self, + face_list_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update information of a Face List, including name and userData. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_face_list_request( + face_list_id=face_list_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_face_lists( + self, *, return_recognition_model: Optional[bool] = None, **kwargs: Any + ) -> List[_models.FaceListItem]: + # pylint: disable=line-too-long + """List Face Lists' faceListId, name, userData and recognitionModel. + + To get face information inside Face List use "Get Face List". + + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: list of FaceListItem + :rtype: list[~azure.ai.vision.face.models.FaceListItem] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "faceListId": "str", # Valid character is letter in lower case or + digit or '-' or '_', maximum length is 64. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.FaceListItem]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_face_lists_request( + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.FaceListItem], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def add_face_list_face_from_url( + self, + face_list_id: str, + body: JSON, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Face List, up to 1,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Face List + Face" or "Delete Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Required. + :type body: JSON + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + async def add_face_list_face_from_url( + self, + face_list_id: str, + *, + url: str, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Face List, up to 1,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Face List + Face" or "Delete Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + async def add_face_list_face_from_url( + self, + face_list_id: str, + body: IO[bytes], + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Face List, up to 1,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Face List + Face" or "Delete Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Required. + :type body: IO[bytes] + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @distributed_trace_async + async def add_face_list_face_from_url( + self, + face_list_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Face List, up to 1,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Face List + Face" or "Delete Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + if body is _Unset: + if url is _Unset: + raise TypeError("missing required argument: url") + body = {"url": url} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_add_face_list_face_from_url_request( + face_list_id=face_list_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def add_face_list_face( + self, + face_list_id: str, + image_content: bytes, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Face List, up to 1,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Face List + Face" or "Delete Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param image_content: The image to be analyzed. Required. + :type image_content: bytes + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + _content = image_content + + _request = build_face_administration_add_face_list_face_request( + face_list_id=face_list_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_face_list_face( # pylint: disable=inconsistent-return-statements + self, face_list_id: str, persisted_face_id: str, **kwargs: Any + ) -> None: + """Delete a face from a Face List by specified faceListId and persistedFaceId. + + Adding/deleting faces to/from a same Face List are processed sequentially and to/from different + Face Lists are in parallel. + + :param face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :type face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_face_list_face_request( + face_list_id=face_list_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @overload + async def create_large_face_list( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + # pylint: disable=line-too-long + """Create an empty Large Face List with user-specified largeFaceListId, name, an optional userData + and recognitionModel. + + Large Face List is a list of faces, up to 1,000,000 faces, and used by "Find Similar From Large + Face List". + + After creation, user should use Add Large Face List Face to import the faces and Train Large + Face List to make it ready for "Find Similar". No image will be stored. Only the extracted face + feature(s) will be stored on server until Delete Large Face List is called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 64 Large Face Lists. + * S0-tier subscription quota: 1,000,000 Large Face Lists. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def create_large_face_list( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any + ) -> None: + """Create an empty Large Face List with user-specified largeFaceListId, name, an optional userData + and recognitionModel. + + Large Face List is a list of faces, up to 1,000,000 faces, and used by "Find Similar From Large + Face List". + + After creation, user should use Add Large Face List Face to import the faces and Train Large + Face List to make it ready for "Find Similar". No image will be stored. Only the extracted face + feature(s) will be stored on server until Delete Large Face List is called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 64 Large Face Lists. + * S0-tier subscription quota: 1,000,000 Large Face Lists. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_large_face_list( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Create an empty Large Face List with user-specified largeFaceListId, name, an optional userData + and recognitionModel. + + Large Face List is a list of faces, up to 1,000,000 faces, and used by "Find Similar From Large + Face List". + + After creation, user should use Add Large Face List Face to import the faces and Train Large + Face List to make it ready for "Find Similar". No image will be stored. Only the extracted face + feature(s) will be stored on server until Delete Large Face List is called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 64 Large Face Lists. + * S0-tier subscription quota: 1,000,000 Large Face Lists. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_large_face_list( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any + ) -> None: + # pylint: disable=line-too-long + """Create an empty Large Face List with user-specified largeFaceListId, name, an optional userData + and recognitionModel. + + Large Face List is a list of faces, up to 1,000,000 faces, and used by "Find Similar From Large + Face List". + + After creation, user should use Add Large Face List Face to import the faces and Train Large + Face List to make it ready for "Find Similar". No image will be stored. Only the extracted face + feature(s) will be stored on server until Delete Large Face List is called. + + "Find Similar" is used for scenario like finding celebrity-like faces, similar face filtering, + or as a light way face identification. But if the actual use is to identify person, please use + Person Group / Large Person Group and "Identify". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 64 Large Face Lists. + * S0-tier subscription quota: 1,000,000 Large Face Lists. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "recognitionmodel": recognition_model, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_large_face_list_request( + large_face_list_id=large_face_list_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def delete_large_face_list( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, **kwargs: Any + ) -> None: + """Delete a face from a Large Face List by specified largeFaceListId and persistedFaceId. + + Adding/deleting faces to/from a same Large Face List are processed sequentially and to/from + different Large Face Lists are in parallel. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_large_face_list_request( + large_face_list_id=large_face_list_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_large_face_list( + self, large_face_list_id: str, *, return_recognition_model: Optional[bool] = None, **kwargs: Any + ) -> _models.LargeFaceList: + # pylint: disable=line-too-long + """Retrieve a Large Face List's largeFaceListId, name, userData and recognitionModel. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: LargeFaceList. The LargeFaceList is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.LargeFaceList + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "largeFaceListId": "str", # Valid character is letter in lower case or digit + or '-' or '_', maximum length is 64. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LargeFaceList] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_face_list_request( + large_face_list_id=large_face_list_id, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LargeFaceList, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_large_face_list( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update information of a Large Face List, including name and userData. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def update_large_face_list( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update information of a Large Face List, including name and userData. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_large_face_list( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update information of a Large Face List, including name and userData. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_large_face_list( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update information of a Large Face List, including name and userData. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_large_face_list_request( + large_face_list_id=large_face_list_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_large_face_lists( + self, + *, + start: Optional[str] = None, + top: Optional[int] = None, + return_recognition_model: Optional[bool] = None, + **kwargs: Any + ) -> List[_models.LargeFaceList]: + # pylint: disable=line-too-long + """List Large Face Lists' information of largeFaceListId, name, userData and recognitionModel. + + To get face information inside largeFaceList use "Get Large Face List Face". + + Large Face Lists are stored in alphabetical order of largeFaceListId. + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: list of LargeFaceList + :rtype: list[~azure.ai.vision.face.models.LargeFaceList] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "largeFaceListId": "str", # Valid character is letter in lower case + or digit or '-' or '_', maximum length is 64. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LargeFaceList]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_face_lists_request( + start=start, + top=top, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LargeFaceList], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_large_face_list_training_status( + self, large_face_list_id: str, **kwargs: Any + ) -> _models.FaceCollectionTrainingResult: + # pylint: disable=line-too-long + """To check the Large Face List training status completed or still ongoing. Large Face List + training is an asynchronous operation triggered by "Train Large Face List". + + Training time depends on the number of face entries in a Large Face List. It could be in + seconds, or up to half an hour for 1,000,000 faces. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :return: FaceCollectionTrainingResult. The FaceCollectionTrainingResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceCollectionTrainingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "createdDateTime": "2020-02-20 00:00:00", # A combined UTC date and time + string that describes the created time of the person group, large person group or + large face list. Required. + "lastActionDateTime": "2020-02-20 00:00:00", # A combined UTC date and time + string that describes the last modify time of the person group, large person + group or large face list, could be null value when the group is not successfully + trained. Required. + "lastSuccessfulTrainingDateTime": "2020-02-20 00:00:00", # A combined UTC + date and time string that describes the last successful training time of the + person group, large person group or large face list. Required. + "status": "str", # Training status of the container. Required. Known values + are: "notStarted", "running", "succeeded", and "failed". + "message": "str" # Optional. Show failure message when training failed + (omitted when training succeed). + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FaceCollectionTrainingResult] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_face_list_training_status_request( + large_face_list_id=large_face_list_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceCollectionTrainingResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _train_large_face_list_initial( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, **kwargs: Any + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_train_large_face_list_request( + large_face_list_id=large_face_list_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def begin_train_large_face_list(self, large_face_list_id: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Submit a Large Face List training task. + + Training is a crucial step that only a trained Large Face List can be used by "Find Similar + From Large Face List". + + The training task is an asynchronous task. Training time depends on the number of face entries + in a Large Face List. It could be in seconds, or up to half an hour for 1,000,000 faces. To + check training completion, please use "Get Large Face List Training Status". + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._train_large_face_list_initial( # type: ignore + large_face_list_id=large_face_list_id, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @overload + async def add_large_face_list_face_from_url( + self, + large_face_list_id: str, + body: JSON, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Large Face List, up to 1,000,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large Face + List Face" or "Delete Large Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 faces per Large Face List. + * S0-tier subscription quota: 1,000,000 faces per Large Face List. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Required. + :type body: JSON + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + async def add_large_face_list_face_from_url( + self, + large_face_list_id: str, + *, + url: str, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Large Face List, up to 1,000,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large Face + List Face" or "Delete Large Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 faces per Large Face List. + * S0-tier subscription quota: 1,000,000 faces per Large Face List. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + async def add_large_face_list_face_from_url( + self, + large_face_list_id: str, + body: IO[bytes], + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Large Face List, up to 1,000,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large Face + List Face" or "Delete Large Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 faces per Large Face List. + * S0-tier subscription quota: 1,000,000 faces per Large Face List. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Required. + :type body: IO[bytes] + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @distributed_trace_async + async def add_large_face_list_face_from_url( + self, + large_face_list_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Large Face List, up to 1,000,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large Face + List Face" or "Delete Large Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 faces per Large Face List. + * S0-tier subscription quota: 1,000,000 faces per Large Face List. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + if body is _Unset: + if url is _Unset: + raise TypeError("missing required argument: url") + body = {"url": url} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_add_large_face_list_face_from_url_request( + large_face_list_id=large_face_list_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def add_large_face_list_face( + self, + large_face_list_id: str, + image_content: bytes, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a specified Large Face List, up to 1,000,000 faces. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large Face + List Face" or "Delete Large Face List" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 faces per Large Face List. + * S0-tier subscription quota: 1,000,000 faces per Large Face List. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param image_content: The image to be analyzed. Required. + :type image_content: bytes + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + _content = image_content + + _request = build_face_administration_add_large_face_list_face_request( + large_face_list_id=large_face_list_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_large_face_list_face( # pylint: disable=inconsistent-return-statements + self, large_face_list_id: str, persisted_face_id: str, **kwargs: Any + ) -> None: + """Delete a face from a Large Face List by specified largeFaceListId and persistedFaceId. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_large_face_list_face_request( + large_face_list_id=large_face_list_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_large_face_list_face( + self, large_face_list_id: str, persisted_face_id: str, **kwargs: Any + ) -> _models.LargeFaceListFace: + """Retrieve persisted face in Large Face List by largeFaceListId and persistedFaceId. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: LargeFaceListFace. The LargeFaceListFace is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.LargeFaceListFace + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str", # Face ID of the face. Required. + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LargeFaceListFace] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_face_list_face_request( + large_face_list_id=large_face_list_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LargeFaceListFace, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_large_face_list_face( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + persisted_face_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Update a specified face's userData field in a Large Face List by its persistedFaceId. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + + @overload + async def update_large_face_list_face( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + persisted_face_id: str, + *, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update a specified face's userData field in a Large Face List by its persistedFaceId. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_large_face_list_face( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + persisted_face_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Update a specified face's userData field in a Large Face List by its persistedFaceId. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_large_face_list_face( # pylint: disable=inconsistent-return-statements + self, + large_face_list_id: str, + persisted_face_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update a specified face's userData field in a Large Face List by its persistedFaceId. + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_large_face_list_face_request( + large_face_list_id=large_face_list_id, + persisted_face_id=persisted_face_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_large_face_list_faces( + self, large_face_list_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.LargeFaceListFace]: + """List faces' persistedFaceId and userData in a specified Large Face List. + + Faces are stored in alphabetical order of persistedFaceId created in "Add Large Face List + Face". + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :param large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :type large_face_list_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of LargeFaceListFace + :rtype: list[~azure.ai.vision.face.models.LargeFaceListFace] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "persistedFaceId": "str", # Face ID of the face. Required. + "userData": "str" # Optional. User-provided data attached to the + face. The length limit is 1K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LargeFaceListFace]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_face_list_faces_request( + large_face_list_id=large_face_list_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LargeFaceListFace], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_person_group( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + # pylint: disable=line-too-long + """Create a new Person Group with specified personGroupId, name, user-provided userData and + recognitionModel. + + A Person Group is a container holding the uploaded person data, including face recognition + features. + + After creation, use "Create Person Group Person" to add persons into the group, and then call + "Train Person Group" to get this group ready for "Identify From Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Person Group Person" or "Delete Person Group" is called. + + 'recognitionModel' should be specified to associate with this Person Group. The default value + for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Person + Group will use the recognition model that's already associated with the collection. Existing + face feature(s) in a Person Group can't be updated to features extracted by another version of + recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Person Groups. Each holds up to 1,000 persons. + * S0-tier subscription quota: 1,000,000 Person Groups. Each holds up to 10,000 persons. + * to handle larger scale face identification problem, please consider using Large Person + Group. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def create_person_group( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any + ) -> None: + """Create a new Person Group with specified personGroupId, name, user-provided userData and + recognitionModel. + + A Person Group is a container holding the uploaded person data, including face recognition + features. + + After creation, use "Create Person Group Person" to add persons into the group, and then call + "Train Person Group" to get this group ready for "Identify From Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Person Group Person" or "Delete Person Group" is called. + + 'recognitionModel' should be specified to associate with this Person Group. The default value + for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Person + Group will use the recognition model that's already associated with the collection. Existing + face feature(s) in a Person Group can't be updated to features extracted by another version of + recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Person Groups. Each holds up to 1,000 persons. + * S0-tier subscription quota: 1,000,000 Person Groups. Each holds up to 10,000 persons. + * to handle larger scale face identification problem, please consider using Large Person + Group. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_person_group( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Create a new Person Group with specified personGroupId, name, user-provided userData and + recognitionModel. + + A Person Group is a container holding the uploaded person data, including face recognition + features. + + After creation, use "Create Person Group Person" to add persons into the group, and then call + "Train Person Group" to get this group ready for "Identify From Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Person Group Person" or "Delete Person Group" is called. + + 'recognitionModel' should be specified to associate with this Person Group. The default value + for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Person + Group will use the recognition model that's already associated with the collection. Existing + face feature(s) in a Person Group can't be updated to features extracted by another version of + recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Person Groups. Each holds up to 1,000 persons. + * S0-tier subscription quota: 1,000,000 Person Groups. Each holds up to 10,000 persons. + * to handle larger scale face identification problem, please consider using Large Person + Group. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_person_group( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any + ) -> None: + # pylint: disable=line-too-long + """Create a new Person Group with specified personGroupId, name, user-provided userData and + recognitionModel. + + A Person Group is a container holding the uploaded person data, including face recognition + features. + + After creation, use "Create Person Group Person" to add persons into the group, and then call + "Train Person Group" to get this group ready for "Identify From Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Person Group Person" or "Delete Person Group" is called. + + 'recognitionModel' should be specified to associate with this Person Group. The default value + for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Person + Group will use the recognition model that's already associated with the collection. Existing + face feature(s) in a Person Group can't be updated to features extracted by another version of + recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Person Groups. Each holds up to 1,000 persons. + * S0-tier subscription quota: 1,000,000 Person Groups. Each holds up to 10,000 persons. + * to handle larger scale face identification problem, please consider using Large Person + Group. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "recognitionmodel": recognition_model, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_person_group_request( + person_group_id=person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def delete_person_group( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, **kwargs: Any + ) -> None: + """Delete an existing Person Group with specified personGroupId. Persisted data in this Person + Group will be deleted. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_person_group_request( + person_group_id=person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_person_group( + self, person_group_id: str, *, return_recognition_model: Optional[bool] = None, **kwargs: Any + ) -> _models.PersonGroup: + # pylint: disable=line-too-long + """Retrieve Person Group name, userData and recognitionModel. To get person information under this + personGroup, use "Get Person Group Persons". + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: PersonGroup. The PersonGroup is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.PersonGroup + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "name": "str", # User defined name, maximum length is 128. Required. + "personGroupId": "str", # ID of the container. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PersonGroup] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_group_request( + person_group_id=person_group_id, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PersonGroup, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_person_group( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update an existing Person Group's name and userData. The properties keep unchanged if they are + not in request body. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def update_person_group( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update an existing Person Group's name and userData. The properties keep unchanged if they are + not in request body. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_person_group( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update an existing Person Group's name and userData. The properties keep unchanged if they are + not in request body. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_person_group( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update an existing Person Group's name and userData. The properties keep unchanged if they are + not in request body. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_person_group_request( + person_group_id=person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_person_groups( + self, + *, + start: Optional[str] = None, + top: Optional[int] = None, + return_recognition_model: Optional[bool] = None, + **kwargs: Any + ) -> List[_models.PersonGroup]: + # pylint: disable=line-too-long + """List Person Groups' personGroupId, name, userData and recognitionModel. + + Person Groups are stored in alphabetical order of personGroupId. + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: list of PersonGroup + :rtype: list[~azure.ai.vision.face.models.PersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "name": "str", # User defined name, maximum length is 128. Required. + "personGroupId": "str", # ID of the container. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.PersonGroup]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_groups_request( + start=start, + top=top, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.PersonGroup], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_person_group_training_status( + self, person_group_id: str, **kwargs: Any + ) -> _models.FaceCollectionTrainingResult: + # pylint: disable=line-too-long + """To check Person Group training status completed or still ongoing. Person Group training is an + asynchronous operation triggered by "Train Person Group" API. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :return: FaceCollectionTrainingResult. The FaceCollectionTrainingResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceCollectionTrainingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "createdDateTime": "2020-02-20 00:00:00", # A combined UTC date and time + string that describes the created time of the person group, large person group or + large face list. Required. + "lastActionDateTime": "2020-02-20 00:00:00", # A combined UTC date and time + string that describes the last modify time of the person group, large person + group or large face list, could be null value when the group is not successfully + trained. Required. + "lastSuccessfulTrainingDateTime": "2020-02-20 00:00:00", # A combined UTC + date and time string that describes the last successful training time of the + person group, large person group or large face list. Required. + "status": "str", # Training status of the container. Required. Known values + are: "notStarted", "running", "succeeded", and "failed". + "message": "str" # Optional. Show failure message when training failed + (omitted when training succeed). + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FaceCollectionTrainingResult] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_group_training_status_request( + person_group_id=person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceCollectionTrainingResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _train_person_group_initial( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, **kwargs: Any + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_train_person_group_request( + person_group_id=person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def begin_train_person_group(self, person_group_id: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Submit a Person Group training task. Training is a crucial step that only a trained Person + Group can be used by "Identify From Person Group". + + The training task is an asynchronous task. Training time depends on the number of person + entries, and their faces in a Person Group. It could be several seconds to minutes. To check + training status, please use "Get Person Group Training Status". + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._train_person_group_initial( # type: ignore + person_group_id=person_group_id, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @overload + async def create_person_group_person( + self, person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreatePersonResult: + """Create a new person in a specified Person Group. To add face to this person, please call "Add + Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Person Groups. + + * S0-tier subscription quota: + + * 10,000 persons per Person Group. + * 1,000,000 Person Groups. + * 100,000,000 persons in all Person Groups. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @overload + async def create_person_group_person( + self, + person_group_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any + ) -> _models.CreatePersonResult: + """Create a new person in a specified Person Group. To add face to this person, please call "Add + Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Person Groups. + + * S0-tier subscription quota: + + * 10,000 persons per Person Group. + * 1,000,000 Person Groups. + * 100,000,000 persons in all Person Groups. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @overload + async def create_person_group_person( + self, person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreatePersonResult: + """Create a new person in a specified Person Group. To add face to this person, please call "Add + Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Person Groups. + + * S0-tier subscription quota: + + * 10,000 persons per Person Group. + * 1,000,000 Person Groups. + * 100,000,000 persons in all Person Groups. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @distributed_trace_async + async def create_person_group_person( + self, + person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + **kwargs: Any + ) -> _models.CreatePersonResult: + """Create a new person in a specified Person Group. To add face to this person, please call "Add + Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Person Groups. + + * S0-tier subscription quota: + + * 10,000 persons per Person Group. + * 1,000,000 Person Groups. + * 100,000,000 persons in all Person Groups. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.CreatePersonResult] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_person_group_person_request( + person_group_id=person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.CreatePersonResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_person_group_person( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, person_id: str, **kwargs: Any + ) -> None: + """Delete an existing person from a Person Group. The persistedFaceId, userData, person name and + face feature(s) in the person entry will all be deleted. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_person_group_person_request( + person_group_id=person_group_id, + person_id=person_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_person_group_person( + self, person_group_id: str, person_id: str, **kwargs: Any + ) -> _models.PersonGroupPerson: + """Retrieve a person's name and userData, and the persisted faceIds representing the registered + person face feature(s). + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :return: PersonGroupPerson. The PersonGroupPerson is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.PersonGroupPerson + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "name": "str", # User defined name, maximum length is 128. Required. + "personId": "str", # ID of the person. Required. + "persistedFaceIds": [ + "str" # Optional. Face ids of registered faces in the person. + ], + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PersonGroupPerson] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_group_person_request( + person_group_id=person_group_id, + person_id=person_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PersonGroupPerson, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_person_group_person( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, person_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def update_person_group_person( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_person_group_person( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_person_group_person( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_person_group_person_request( + person_group_id=person_group_id, + person_id=person_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_person_group_persons( + self, person_group_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.PersonGroupPerson]: + """List all persons' information in the specified Person Group, including personId, name, userData + and persistedFaceIds of registered person faces. + + Persons are stored in alphabetical order of personId created in "Create Person Group Person". + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of PersonGroupPerson + :rtype: list[~azure.ai.vision.face.models.PersonGroupPerson] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "name": "str", # User defined name, maximum length is 128. Required. + "personId": "str", # ID of the person. Required. + "persistedFaceIds": [ + "str" # Optional. Face ids of registered faces in the + person. + ], + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.PersonGroupPerson]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_group_persons_request( + person_group_id=person_group_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.PersonGroupPerson], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def add_person_group_person_face_from_url( + self, + person_group_id: str, + person_id: str, + body: JSON, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Person + Group Person Face", "Delete Person Group Person" or "Delete Person Group" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: JSON + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + async def add_person_group_person_face_from_url( + self, + person_group_id: str, + person_id: str, + *, + url: str, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Person + Group Person Face", "Delete Person Group Person" or "Delete Person Group" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + async def add_person_group_person_face_from_url( + self, + person_group_id: str, + person_id: str, + body: IO[bytes], + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Person + Group Person Face", "Delete Person Group Person" or "Delete Person Group" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: IO[bytes] + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @distributed_trace_async + async def add_person_group_person_face_from_url( + self, + person_group_id: str, + person_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Person + Group Person Face", "Delete Person Group Person" or "Delete Person Group" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + if body is _Unset: + if url is _Unset: + raise TypeError("missing required argument: url") + body = {"url": url} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_add_person_group_person_face_from_url_request( + person_group_id=person_group_id, + person_id=person_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def add_person_group_person_face( + self, + person_group_id: str, + person_id: str, + image_content: bytes, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Person + Group Person Face", "Delete Person Group Person" or "Delete Person Group" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param image_content: The image to be analyzed. Required. + :type image_content: bytes + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + _content = image_content + + _request = build_face_administration_add_person_group_person_face_request( + person_group_id=person_group_id, + person_id=person_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any + ) -> None: + """Delete a face from a person in a Person Group by specified personGroupId, personId and + persistedFaceId. + + Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_person_group_person_face_request( + person_group_id=person_group_id, + person_id=person_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_person_group_person_face( + self, person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any + ) -> _models.PersonGroupPersonFace: + """Retrieve person face information. The persisted person face is specified by its personGroupId, + personId and persistedFaceId. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: PersonGroupPersonFace. The PersonGroupPersonFace is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.PersonGroupPersonFace + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str", # Face ID of the face. Required. + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PersonGroupPersonFace] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_group_person_face_request( + person_group_id=person_group_id, + person_id=person_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PersonGroupPersonFace, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + persisted_face_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Update a person persisted face's userData field. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + + @overload + async def update_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + persisted_face_id: str, + *, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update a person persisted face's userData field. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + persisted_face_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Update a person persisted face's userData field. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + person_group_id: str, + person_id: str, + persisted_face_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update a person persisted face's userData field. + + :param person_group_id: ID of the container. Required. + :type person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_person_group_person_face_request( + person_group_id=person_group_id, + person_id=person_id, + persisted_face_id=persisted_face_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @overload + async def create_large_person_group( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + # pylint: disable=line-too-long + """Create a new Large Person Group with user-specified largePersonGroupId, name, an optional + userData and recognitionModel. + + A Large Person Group is a container holding the uploaded person data, including the face + recognition features. It can hold up to 1,000,000 entities. + + After creation, use "Create Large Person Group Person" to add person into the group, and call + "Train Large Person Group" to get this group ready for "Identify From Large Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + 'recognitionModel' should be specified to associate with this Large Person Group. The default + value for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Large + Person Group will use the recognition model that's already associated with the collection. + Existing face feature(s) in a Large Person Group can't be updated to features extracted by + another version of recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Large Person Groups. + * S0-tier subscription quota: 1,000,000 Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def create_large_person_group( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any + ) -> None: + """Create a new Large Person Group with user-specified largePersonGroupId, name, an optional + userData and recognitionModel. + + A Large Person Group is a container holding the uploaded person data, including the face + recognition features. It can hold up to 1,000,000 entities. + + After creation, use "Create Large Person Group Person" to add person into the group, and call + "Train Large Person Group" to get this group ready for "Identify From Large Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + 'recognitionModel' should be specified to associate with this Large Person Group. The default + value for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Large + Person Group will use the recognition model that's already associated with the collection. + Existing face feature(s) in a Large Person Group can't be updated to features extracted by + another version of recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Large Person Groups. + * S0-tier subscription quota: 1,000,000 Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_large_person_group( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Create a new Large Person Group with user-specified largePersonGroupId, name, an optional + userData and recognitionModel. + + A Large Person Group is a container holding the uploaded person data, including the face + recognition features. It can hold up to 1,000,000 entities. + + After creation, use "Create Large Person Group Person" to add person into the group, and call + "Train Large Person Group" to get this group ready for "Identify From Large Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + 'recognitionModel' should be specified to associate with this Large Person Group. The default + value for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Large + Person Group will use the recognition model that's already associated with the collection. + Existing face feature(s) in a Large Person Group can't be updated to features extracted by + another version of recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Large Person Groups. + * S0-tier subscription quota: 1,000,000 Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_large_person_group( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, _models.FaceRecognitionModel]] = None, + **kwargs: Any + ) -> None: + # pylint: disable=line-too-long + """Create a new Large Person Group with user-specified largePersonGroupId, name, an optional + userData and recognitionModel. + + A Large Person Group is a container holding the uploaded person data, including the face + recognition features. It can hold up to 1,000,000 entities. + + After creation, use "Create Large Person Group Person" to add person into the group, and call + "Train Large Person Group" to get this group ready for "Identify From Large Person Group". + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + 'recognitionModel' should be specified to associate with this Large Person Group. The default + value for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly + specify the model you need in this parameter. New faces that are added to an existing Large + Person Group will use the recognition model that's already associated with the collection. + Existing face feature(s) in a Large Person Group can't be updated to features extracted by + another version of recognition model. + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: 1,000 Large Person Groups. + * S0-tier subscription quota: 1,000,000 Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword recognition_model: The 'recognitionModel' associated with this face list. Supported + 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and + 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since + its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall + accuracy is improved compared with 'recognition_01' and 'recognition_02'. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Default value is + None. + :paramtype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. The 'recognitionModel' associated + with this face list. Supported 'recognitionModel' values include + 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The + default value is 'recognition_01'. 'recognition_04' is recommended since its + accuracy is improved on faces wearing masks compared with 'recognition_03', and + its overall accuracy is improved compared with 'recognition_01' and + 'recognition_02'. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "recognitionmodel": recognition_model, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_large_person_group_request( + large_person_group_id=large_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def delete_large_person_group( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, **kwargs: Any + ) -> None: + """Delete an existing Large Person Group with specified personGroupId. Persisted data in this + Large Person Group will be deleted. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_large_person_group_request( + large_person_group_id=large_person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_large_person_group( + self, large_person_group_id: str, *, return_recognition_model: Optional[bool] = None, **kwargs: Any + ) -> _models.LargePersonGroup: + # pylint: disable=line-too-long + """Retrieve the information of a Large Person Group, including its name, userData and + recognitionModel. This API returns Large Person Group information only, use "Get Large Person + Group Persons" instead to retrieve person information under the Large Person Group. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: LargePersonGroup. The LargePersonGroup is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.LargePersonGroup + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "largePersonGroupId": "str", # ID of the container. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LargePersonGroup] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_person_group_request( + large_person_group_id=large_person_group_id, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LargePersonGroup, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_large_person_group( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update an existing Large Person Group's name and userData. The properties keep unchanged if + they are not in request body. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def update_large_person_group( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update an existing Large Person Group's name and userData. The properties keep unchanged if + they are not in request body. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_large_person_group( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update an existing Large Person Group's name and userData. The properties keep unchanged if + they are not in request body. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_large_person_group( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update an existing Large Person Group's name and userData. The properties keep unchanged if + they are not in request body. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_large_person_group_request( + large_person_group_id=large_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_large_person_groups( + self, + *, + start: Optional[str] = None, + top: Optional[int] = None, + return_recognition_model: Optional[bool] = None, + **kwargs: Any + ) -> List[_models.LargePersonGroup]: + # pylint: disable=line-too-long + """List all existing Large Person Groups' largePersonGroupId, name, userData and recognitionModel. + + Large Person Groups are stored in alphabetical order of largePersonGroupId. + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :keyword return_recognition_model: Return 'recognitionModel' or not. The default value is + false. Default value is None. + :paramtype return_recognition_model: bool + :return: list of LargePersonGroup + :rtype: list[~azure.ai.vision.face.models.LargePersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "largePersonGroupId": "str", # ID of the container. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "recognitionModel": "str", # Optional. Name of recognition model. + Recognition model is used when the face features are extracted and associated + with detected faceIds. Known values are: "recognition_01", "recognition_02", + "recognition_03", and "recognition_04". + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LargePersonGroup]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_person_groups_request( + start=start, + top=top, + return_recognition_model=return_recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LargePersonGroup], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_large_person_group_training_status( + self, large_person_group_id: str, **kwargs: Any + ) -> _models.FaceCollectionTrainingResult: + # pylint: disable=line-too-long + """To check Large Person Group training status completed or still ongoing. Large Person Group + training is an asynchronous operation triggered by "Train Large Person Group" API. + + Training time depends on the number of person entries, and their faces in a Large Person Group. + It could be in seconds, or up to half an hour for 1,000,000 persons. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :return: FaceCollectionTrainingResult. The FaceCollectionTrainingResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.FaceCollectionTrainingResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "createdDateTime": "2020-02-20 00:00:00", # A combined UTC date and time + string that describes the created time of the person group, large person group or + large face list. Required. + "lastActionDateTime": "2020-02-20 00:00:00", # A combined UTC date and time + string that describes the last modify time of the person group, large person + group or large face list, could be null value when the group is not successfully + trained. Required. + "lastSuccessfulTrainingDateTime": "2020-02-20 00:00:00", # A combined UTC + date and time string that describes the last successful training time of the + person group, large person group or large face list. Required. + "status": "str", # Training status of the container. Required. Known values + are: "notStarted", "running", "succeeded", and "failed". + "message": "str" # Optional. Show failure message when training failed + (omitted when training succeed). + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FaceCollectionTrainingResult] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_person_group_training_status_request( + large_person_group_id=large_person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FaceCollectionTrainingResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _train_large_person_group_initial( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, **kwargs: Any + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_train_large_person_group_request( + large_person_group_id=large_person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def begin_train_large_person_group(self, large_person_group_id: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Submit a Large Person Group training task. Training is a crucial step that only a trained Large + Person Group can be used by "Identify From Large Person Group". + + The training task is an asynchronous task. Training time depends on the number of person + entries, and their faces in a Large Person Group. It could be in several seconds, or up to half + a hour for 1,000,000 persons. To check training status, please use "Get Large Person Group + Training Status". + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._train_large_person_group_initial( # type: ignore + large_person_group_id=large_person_group_id, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @overload + async def create_large_person_group_person( + self, large_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreatePersonResult: + """Create a new person in a specified Large Person Group. To add face to this person, please call + "Add Large Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Large Person Groups. + + * S0-tier subscription quota: + + * 1,000,000 persons per Large Person Group. + * 1,000,000 Large Person Groups. + * 1,000,000,000 persons in all Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @overload + async def create_large_person_group_person( + self, + large_person_group_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any + ) -> _models.CreatePersonResult: + """Create a new person in a specified Large Person Group. To add face to this person, please call + "Add Large Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Large Person Groups. + + * S0-tier subscription quota: + + * 1,000,000 persons per Large Person Group. + * 1,000,000 Large Person Groups. + * 1,000,000,000 persons in all Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @overload + async def create_large_person_group_person( + self, large_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreatePersonResult: + """Create a new person in a specified Large Person Group. To add face to this person, please call + "Add Large Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Large Person Groups. + + * S0-tier subscription quota: + + * 1,000,000 persons per Large Person Group. + * 1,000,000 Large Person Groups. + * 1,000,000,000 persons in all Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @distributed_trace_async + async def create_large_person_group_person( + self, + large_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + **kwargs: Any + ) -> _models.CreatePersonResult: + """Create a new person in a specified Large Person Group. To add face to this person, please call + "Add Large Person Group Person Face". + + .. + + [!NOTE] + + * + + + * Free-tier subscription quota: + + * 1,000 persons in all Large Person Groups. + + * S0-tier subscription quota: + + * 1,000,000 persons per Large Person Group. + * 1,000,000 Large Person Groups. + * 1,000,000,000 persons in all Large Person Groups. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: CreatePersonResult. The CreatePersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreatePersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 200 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.CreatePersonResult] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_large_person_group_person_request( + large_person_group_id=large_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.CreatePersonResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_large_person_group_person( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, person_id: str, **kwargs: Any + ) -> None: + """Delete an existing person from a Large Person Group. The persistedFaceId, userData, person name + and face feature(s) in the person entry will all be deleted. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_large_person_group_person_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_large_person_group_person( + self, large_person_group_id: str, person_id: str, **kwargs: Any + ) -> _models.LargePersonGroupPerson: + """Retrieve a person's name and userData, and the persisted faceIds representing the registered + person face feature(s). + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :return: LargePersonGroupPerson. The LargePersonGroupPerson is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.LargePersonGroupPerson + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "name": "str", # User defined name, maximum length is 128. Required. + "personId": "str", # ID of the person. Required. + "persistedFaceIds": [ + "str" # Optional. Face ids of registered faces in the person. + ], + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LargePersonGroupPerson] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_person_group_person_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LargePersonGroupPerson, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_large_person_group_person( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def update_large_person_group_person( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_large_person_group_person( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_large_person_group_person( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_large_person_group_person_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_large_person_group_persons( + self, large_person_group_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.LargePersonGroupPerson]: + """List all persons' information in the specified Large Person Group, including personId, name, + userData and persistedFaceIds of registered person faces. + + Persons are stored in alphabetical order of personId created in "Create Large Person Group + Person". + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of LargePersonGroupPerson + :rtype: list[~azure.ai.vision.face.models.LargePersonGroupPerson] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "name": "str", # User defined name, maximum length is 128. Required. + "personId": "str", # ID of the person. Required. + "persistedFaceIds": [ + "str" # Optional. Face ids of registered faces in the + person. + ], + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LargePersonGroupPerson]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_person_group_persons_request( + large_person_group_id=large_person_group_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LargePersonGroupPerson], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def add_large_person_group_person_face_from_url( # pylint: disable=name-too-long + self, + large_person_group_id: str, + person_id: str, + body: JSON, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Large Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large + Person Group Person Face", "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: JSON + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + async def add_large_person_group_person_face_from_url( # pylint: disable=name-too-long + self, + large_person_group_id: str, + person_id: str, + *, + url: str, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Large Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large + Person Group Person Face", "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + async def add_large_person_group_person_face_from_url( # pylint: disable=name-too-long + self, + large_person_group_id: str, + person_id: str, + body: IO[bytes], + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Large Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large + Person Group Person Face", "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: IO[bytes] + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @distributed_trace_async + async def add_large_person_group_person_face_from_url( # pylint: disable=name-too-long + self, + large_person_group_id: str, + person_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Large Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large + Person Group Person Face", "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + if body is _Unset: + if url is _Unset: + raise TypeError("missing required argument: url") + body = {"url": url} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_add_large_person_group_person_face_from_url_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def add_large_person_group_person_face( + self, + large_person_group_id: str, + person_id: str, + image_content: bytes, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> _models.AddFaceResult: + # pylint: disable=line-too-long + """Add a face to a person into a Large Person Group for face identification or verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until "Delete Large + Person Group Person Face", "Delete Large Person Group Person" or "Delete Large Person Group" is + called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param image_content: The image to be analyzed. Required. + :type image_content: bytes + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: AddFaceResult. The AddFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.AddFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + + _content = image_content + + _request = build_face_administration_add_large_person_group_person_face_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AddFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_large_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, large_person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any + ) -> None: + """Delete a face from a person in a Large Person Group by specified largePersonGroupId, personId + and persistedFaceId. + + Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_large_person_group_person_face_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_large_person_group_person_face( + self, large_person_group_id: str, person_id: str, persisted_face_id: str, **kwargs: Any + ) -> _models.LargePersonGroupPersonFace: + """Retrieve person face information. The persisted person face is specified by its + largePersonGroupId, personId and persistedFaceId. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: LargePersonGroupPersonFace. The LargePersonGroupPersonFace is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.LargePersonGroupPersonFace + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str", # Face ID of the face. Required. + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LargePersonGroupPersonFace] = kwargs.pop("cls", None) + + _request = build_face_administration_get_large_person_group_person_face_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LargePersonGroupPersonFace, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_large_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + persisted_face_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Update a person persisted face's userData field. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + + @overload + async def update_large_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + persisted_face_id: str, + *, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update a person persisted face's userData field. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_large_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + persisted_face_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Update a person persisted face's userData field. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_large_person_group_person_face( # pylint: disable=inconsistent-return-statements + self, + large_person_group_id: str, + person_id: str, + persisted_face_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update a person persisted face's userData field. + + :param large_person_group_id: ID of the container. Required. + :type large_person_group_id: str + :param person_id: ID of the person. Required. + :type person_id: str + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_large_person_group_person_face_request( + large_person_group_id=large_person_group_id, + person_id=person_id, + persisted_face_id=persisted_face_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + async def _create_person_initial( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + **kwargs: Any + ) -> JSON: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_person_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = _deserialize(JSON, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_create_person( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> AsyncLROPoller[_models.CreatePersonResult]: + """Creates a new person in a Person Directory. To add face to this person, please call Person + Directory "Add Person Face". + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns CreatePersonResult. The CreatePersonResult + is compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.vision.face.models.CreatePersonResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 202 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @overload + async def begin_create_person( + self, *, name: str, content_type: str = "application/json", user_data: Optional[str] = None, **kwargs: Any + ) -> AsyncLROPoller[_models.CreatePersonResult]: + """Creates a new person in a Person Directory. To add face to this person, please call Person + Directory "Add Person Face". + + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: An instance of AsyncLROPoller that returns CreatePersonResult. The CreatePersonResult + is compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.vision.face.models.CreatePersonResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @overload + async def begin_create_person( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> AsyncLROPoller[_models.CreatePersonResult]: + """Creates a new person in a Person Directory. To add face to this person, please call Person + Directory "Add Person Face". + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns CreatePersonResult. The CreatePersonResult + is compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.vision.face.models.CreatePersonResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + + @distributed_trace_async + async def begin_create_person( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + **kwargs: Any + ) -> AsyncLROPoller[_models.CreatePersonResult]: + """Creates a new person in a Person Directory. To add face to this person, please call Person + Directory "Add Person Face". + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: An instance of AsyncLROPoller that returns CreatePersonResult. The CreatePersonResult + is compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.vision.face.models.CreatePersonResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 202 + response == { + "personId": "str" # Person ID of the person. Required. + } + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.CreatePersonResult] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_person_initial( + body=body, + name=name, + user_data=user_data, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["operation-Location"] = self._deserialize( + "str", response.headers.get("operation-Location") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = _deserialize(_models.CreatePersonResult, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.CreatePersonResult].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.CreatePersonResult]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _delete_person_initial( # pylint: disable=inconsistent-return-statements + self, person_id: str, **kwargs: Any + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_person_request( + person_id=person_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def begin_delete_person(self, person_id: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Delete an existing person from Person Directory. The persistedFaceId(s), userData, person name + and face feature(s) in the person entry will all be deleted. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_person_initial( # type: ignore + person_id=person_id, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace_async + async def get_person(self, person_id: str, **kwargs: Any) -> _models.PersonDirectoryPerson: + """Retrieve a person's name and userData from Person Directory. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :return: PersonDirectoryPerson. The PersonDirectoryPerson is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.PersonDirectoryPerson + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "name": "str", # User defined name, maximum length is 128. Required. + "personId": "str", # Person ID of the person. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PersonDirectoryPerson] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_request( + person_id=person_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PersonDirectoryPerson, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_person( # pylint: disable=inconsistent-return-statements + self, person_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def update_person( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_person( # pylint: disable=inconsistent-return-statements + self, person_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_person( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update name or userData of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_person_request( + person_id=person_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_persons( + self, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.PersonDirectoryPerson]: + """List all persons' information in Person Directory, including personId, name, and userData. + + Persons are stored in alphabetical order of personId created in Person Directory "Create + Person". + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of PersonDirectoryPerson + :rtype: list[~azure.ai.vision.face.models.PersonDirectoryPerson] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "name": "str", # User defined name, maximum length is 128. Required. + "personId": "str", # Person ID of the person. Required. + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.PersonDirectoryPerson]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_persons_request( + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.PersonDirectoryPerson], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_dynamic_person_group_references( + self, person_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> _models.ListGroupReferenceResult: + """List all Dynamic Person Groups a person has been referenced by in Person Directory. + + Dynamic Person Groups are stored in alphabetical order of Dynamic Person Group ID created in + Person Directory "Create Dynamic Person Group". + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :param person_id: Person ID of the person. Required. + :type person_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: ListGroupReferenceResult. The ListGroupReferenceResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.ListGroupReferenceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "dynamicPersonGroupIds": [ + "str" # Array of PersonDirectory DynamicPersonGroup ids. Required. + ] + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListGroupReferenceResult] = kwargs.pop("cls", None) + + _request = build_face_administration_get_dynamic_person_group_references_request( + person_id=person_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListGroupReferenceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _add_person_face_from_url_initial( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> JSON: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + if body is _Unset: + if url is _Unset: + raise TypeError("missing required argument: url") + body = {"url": url} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_add_person_face_from_url_request( + person_id=person_id, + recognition_model=recognition_model, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = _deserialize(JSON, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_add_person_face_from_url( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + body: JSON, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AddFaceResult]: + # pylint: disable=line-too-long + """Add a face to a person (see Person Directory "Create Person") for face identification or + verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until Person Directory + "Delete Person Face" or "Delete Person" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + * + * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + * This is a long running operation. Use Response Header "Operation-Location" to determine when + the AddFace operation has successfully propagated for future requests to "Identify". For + further information about Operation-Locations see "Get Face Operation Status". + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param body: Required. + :type body: JSON + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns AddFaceResult. The AddFaceResult is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.vision.face.models.AddFaceResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 202 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + async def begin_add_person_face_from_url( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + *, + url: str, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AddFaceResult]: + # pylint: disable=line-too-long + """Add a face to a person (see Person Directory "Create Person") for face identification or + verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until Person Directory + "Delete Person Face" or "Delete Person" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + * + * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + * This is a long running operation. Use Response Header "Operation-Location" to determine when + the AddFace operation has successfully propagated for future requests to "Identify". For + further information about Operation-Locations see "Get Face Operation Status". + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns AddFaceResult. The AddFaceResult is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.vision.face.models.AddFaceResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @overload + async def begin_add_person_face_from_url( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + body: IO[bytes], + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AddFaceResult]: + # pylint: disable=line-too-long + """Add a face to a person (see Person Directory "Create Person") for face identification or + verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until Person Directory + "Delete Person Face" or "Delete Person" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + * + * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + * This is a long running operation. Use Response Header "Operation-Location" to determine when + the AddFace operation has successfully propagated for future requests to "Identify". For + further information about Operation-Locations see "Get Face Operation Status". + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param body: Required. + :type body: IO[bytes] + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns AddFaceResult. The AddFaceResult is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.vision.face.models.AddFaceResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + + @distributed_trace_async + async def begin_add_person_face_from_url( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + body: Union[JSON, IO[bytes]] = _Unset, + *, + url: str = _Unset, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> AsyncLROPoller[_models.AddFaceResult]: + # pylint: disable=line-too-long + """Add a face to a person (see Person Directory "Create Person") for face identification or + verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until Person Directory + "Delete Person Face" or "Delete Person" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + * + * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + * This is a long running operation. Use Response Header "Operation-Location" to determine when + the AddFace operation has successfully propagated for future requests to "Identify". For + further information about Operation-Locations see "Get Face Operation Status". + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword url: URL of input image. Required. + :paramtype url: str + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: An instance of AsyncLROPoller that returns AddFaceResult. The AddFaceResult is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.vision.face.models.AddFaceResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "url": "str" # URL of input image. Required. + } + + # response body for status code(s): 202 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._add_person_face_from_url_initial( + person_id=person_id, + recognition_model=recognition_model, + body=body, + url=url, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["operation-Location"] = self._deserialize( + "str", response.headers.get("operation-Location") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = _deserialize(_models.AddFaceResult, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.AddFaceResult].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.AddFaceResult]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _add_person_face_initial( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + image_content: bytes, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> JSON: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[JSON] = kwargs.pop("cls", None) + + _content = image_content + + _request = build_face_administration_add_person_face_request( + person_id=person_id, + recognition_model=recognition_model, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = _deserialize(JSON, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_add_person_face( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + image_content: bytes, + *, + target_face: Optional[List[int]] = None, + detection_model: Optional[Union[str, _models.FaceDetectionModel]] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> AsyncLROPoller[_models.AddFaceResult]: + # pylint: disable=line-too-long + """Add a face to a person (see Person Directory "Create Person") for face identification or + verification. + + To deal with an image containing multiple faces, input face can be specified as an image with a + targetFace rectangle. It returns a persistedFaceId representing the added face. No image will + be stored. Only the extracted face feature(s) will be stored on server until Person Directory + "Delete Person Face" or "Delete Person" is called. + + Note that persistedFaceId is different from faceId generated by "Detect". + > + * + + + * Higher face image quality means better recognition precision. Please consider high-quality + faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size + is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an + error. If the provided "targetFace" rectangle is not returned from "Detect", there's no + guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions + will cause failures. + * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. + Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum + face size. + * Different 'detectionModel' values can be provided. To use and compare different detection + models, please refer to + https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/how-to/specify-detection-model + * + * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + * This is a long running operation. Use Response Header "Operation-Location" to determine when + the AddFace operation has successfully propagated for future requests to "Identify". For + further information about Operation-Locations see "Get Face Operation Status". + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param image_content: The image to be analyzed. Required. + :type image_content: bytes + :keyword target_face: A face rectangle to specify the target face to be added to a person, in + the format of 'targetFace=left,top,width,height'. Default value is None. + :paramtype target_face: list[int] + :keyword detection_model: The 'detectionModel' associated with the detected faceIds. Supported + 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default + value is 'detection_01'. Known values are: "detection_01", "detection_02", and "detection_03". + Default value is None. + :paramtype detection_model: str or ~azure.ai.vision.face.models.FaceDetectionModel + :keyword user_data: User-provided data attached to the face. The size limit is 1K. Default + value is None. + :paramtype user_data: str + :return: An instance of AsyncLROPoller that returns AddFaceResult. The AddFaceResult is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.vision.face.models.AddFaceResult] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "persistedFaceId": "str" # Persisted Face ID of the added face, which is + persisted and will not expire. Different from faceId which is created in "Detect" + and will expire in 24 hours after the detection call. Required. + } + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[_models.AddFaceResult] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._add_person_face_initial( + person_id=person_id, + recognition_model=recognition_model, + image_content=image_content, + target_face=target_face, + detection_model=detection_model, + user_data=user_data, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["operation-Location"] = self._deserialize( + "str", response.headers.get("operation-Location") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + + deserialized = _deserialize(_models.AddFaceResult, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.AddFaceResult].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.AddFaceResult]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _delete_person_face_initial( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + **kwargs: Any + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_person_face_request( + person_id=person_id, + recognition_model=recognition_model, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def begin_delete_person_face( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Delete a face from a person in Person Directory by specified personId and persistedFaceId. + + Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting + faces to/from different persons are processed in parallel. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_person_face_initial( # type: ignore + person_id=person_id, + recognition_model=recognition_model, + persisted_face_id=persisted_face_id, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace_async + async def get_person_face( + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + **kwargs: Any + ) -> _models.PersonDirectoryFace: + """Retrieve person face information. The persisted person face is specified by its personId. + recognitionModel, and persistedFaceId. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :return: PersonDirectoryFace. The PersonDirectoryFace is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.PersonDirectoryFace + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceId": "str", # Face ID of the face. Required. + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PersonDirectoryFace] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_face_request( + person_id=person_id, + recognition_model=recognition_model, + persisted_face_id=persisted_face_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PersonDirectoryFace, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_person_face( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Update a persisted face's userData field of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + + @overload + async def update_person_face( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + *, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update a persisted face's userData field of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_person_face( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Update a persisted face's userData field of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_person_face( # pylint: disable=inconsistent-return-statements + self, + person_id: str, + recognition_model: Union[str, _models.FaceRecognitionModel], + persisted_face_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update a persisted face's userData field of a person. + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :param persisted_face_id: Face ID of the face. Required. + :type persisted_face_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword user_data: User-provided data attached to the face. The length limit is 1K. Default + value is None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "userData": "str" # Optional. User-provided data attached to the face. The + length limit is 1K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_person_face_request( + person_id=person_id, + recognition_model=recognition_model, + persisted_face_id=persisted_face_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_person_faces( + self, person_id: str, recognition_model: Union[str, _models.FaceRecognitionModel], **kwargs: Any + ) -> _models.ListFaceResult: + """Retrieve a person's persistedFaceIds representing the registered person face feature(s). + + :param person_id: Person ID of the person. Required. + :type person_id: str + :param recognition_model: The 'recognitionModel' associated with faces. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". Required. + :type recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :return: ListFaceResult. The ListFaceResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.ListFaceResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "persistedFaceIds": [ + "str" # Array of persisted face ids. Required. + ], + "personId": "str" # Id of person. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListFaceResult] = kwargs.pop("cls", None) + + _request = build_face_administration_get_person_faces_request( + person_id=person_id, + recognition_model=recognition_model, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListFaceResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _create_dynamic_person_group_with_person_initial( # pylint: disable=inconsistent-return-statements,name-too-long + self, + dynamic_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + add_person_ids: List[str] = _Unset, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + if add_person_ids is _Unset: + raise TypeError("missing required argument: add_person_ids") + body = {"addpersonids": add_person_ids, "name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_dynamic_person_group_with_person_request( + dynamic_person_group_id=dynamic_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @overload + async def begin_create_dynamic_person_group_with_person( # pylint: disable=name-too-long + self, dynamic_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> AsyncLROPoller[_models.DynamicPersonGroup]: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns DynamicPersonGroup. The DynamicPersonGroup + is compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.vision.face.models.DynamicPersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "addPersonIds": [ + "str" # Array of personIds created by Person Directory "Create + Person" to be added. Required. + ], + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 202 + response == { + "dynamicPersonGroupId": "str", # ID of the dynamic person group. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def begin_create_dynamic_person_group_with_person( # pylint: disable=name-too-long + self, + dynamic_person_group_id: str, + *, + name: str, + add_person_ids: List[str], + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any + ) -> AsyncLROPoller[_models.DynamicPersonGroup]: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword add_person_ids: Array of personIds created by Person Directory "Create Person" to be + added. Required. + :paramtype add_person_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: An instance of AsyncLROPoller that returns DynamicPersonGroup. The DynamicPersonGroup + is compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.vision.face.models.DynamicPersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "dynamicPersonGroupId": "str", # ID of the dynamic person group. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def begin_create_dynamic_person_group_with_person( # pylint: disable=name-too-long + self, dynamic_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> AsyncLROPoller[_models.DynamicPersonGroup]: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns DynamicPersonGroup. The DynamicPersonGroup + is compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.vision.face.models.DynamicPersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 202 + response == { + "dynamicPersonGroupId": "str", # ID of the dynamic person group. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @distributed_trace_async + async def begin_create_dynamic_person_group_with_person( # pylint: disable=name-too-long + self, + dynamic_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + add_person_ids: List[str] = _Unset, + user_data: Optional[str] = None, + **kwargs: Any + ) -> AsyncLROPoller[_models.DynamicPersonGroup]: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword add_person_ids: Array of personIds created by Person Directory "Create Person" to be + added. Required. + :paramtype add_person_ids: list[str] + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: An instance of AsyncLROPoller that returns DynamicPersonGroup. The DynamicPersonGroup + is compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.ai.vision.face.models.DynamicPersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "addPersonIds": [ + "str" # Array of personIds created by Person Directory "Create + Person" to be added. Required. + ], + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + + # response body for status code(s): 202 + response == { + "dynamicPersonGroupId": "str", # ID of the dynamic person group. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DynamicPersonGroup] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_dynamic_person_group_with_person_initial( # type: ignore + dynamic_person_group_id=dynamic_person_group_id, + body=body, + name=name, + add_person_ids=add_person_ids, + user_data=user_data, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["operation-Location"] = self._deserialize( + "str", response.headers.get("operation-Location") + ) + + deserialized = _deserialize(_models.DynamicPersonGroup, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.DynamicPersonGroup].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.DynamicPersonGroup]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + @overload + async def create_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, dynamic_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def create_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, + dynamic_person_group_id: str, + *, + name: str, + content_type: str = "application/json", + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, dynamic_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, + dynamic_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Creates a new Dynamic Person Group with specified dynamicPersonGroupId, name, and user-provided + userData. + + A Dynamic Person Group is a container that references Person Directory "Create Person". After + creation, use Person Directory "Update Dynamic Person Group" to add/remove persons to/from the + Dynamic Person Group. + + Dynamic Person Group and user data will be stored on server until Person Directory "Delete + Dynamic Person Group" is called. Use "Identify From Dynamic Person Group" with the + dynamicPersonGroupId parameter to identify against persons. + + No image will be stored. Only the person's extracted face feature(s) and userData will be + stored on server until Person Directory "Delete Person" or "Delete Person Face" is called. + + 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person + Groups are references to Person Directory "Create Person" and therefore work with most all + 'recognitionModels'. The faceId's provided during "Identify" determine the 'recognitionModel' + used. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Required. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_create_dynamic_person_group_request( + dynamic_person_group_id=dynamic_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + async def _delete_dynamic_person_group_initial( # pylint: disable=inconsistent-return-statements + self, dynamic_person_group_id: str, **kwargs: Any + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_administration_delete_dynamic_person_group_request( + dynamic_person_group_id=dynamic_person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def begin_delete_dynamic_person_group( + self, dynamic_person_group_id: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes an existing Dynamic Person Group with specified dynamicPersonGroupId. + + Deleting this Dynamic Person Group only delete the references to persons data. To delete actual + person see Person Directory "Delete Person". + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_dynamic_person_group_initial( # type: ignore + dynamic_person_group_id=dynamic_person_group_id, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace_async + async def get_dynamic_person_group(self, dynamic_person_group_id: str, **kwargs: Any) -> _models.DynamicPersonGroup: + """Retrieve the information of a Dynamic Person Group, including its name and userData. + + This API returns Dynamic Person Group information only, use Person Directory "Get Dynamic + Person Group Persons" instead to retrieve person information under the Dynamic Person Group. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :return: DynamicPersonGroup. The DynamicPersonGroup is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.DynamicPersonGroup + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "dynamicPersonGroupId": "str", # ID of the dynamic person group. Required. + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.DynamicPersonGroup] = kwargs.pop("cls", None) + + _request = build_face_administration_get_dynamic_person_group_request( + dynamic_person_group_id=dynamic_person_group_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.DynamicPersonGroup, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _update_dynamic_person_group_with_person_changes_initial( # pylint: disable=inconsistent-return-statements,name-too-long + self, + dynamic_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + add_person_ids: Optional[List[str]] = None, + remove_person_ids: Optional[List[str]] = None, + **kwargs: Any + ) -> None: + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "addpersonids": add_person_ids, + "name": name, + "removepersonids": remove_person_ids, + "userdata": user_data, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_dynamic_person_group_with_person_changes_request( + dynamic_person_group_id=dynamic_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["operation-Location"] = self._deserialize("str", response.headers.get("operation-Location")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @overload + async def begin_update_dynamic_person_group_with_person_changes( # pylint: disable=name-too-long + self, dynamic_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> AsyncLROPoller[None]: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "addPersonIds": [ + "str" # Optional. Array of personIds created by Person Directory + "Create Person" to be added. + ], + "name": "str", # Optional. User defined name, maximum length is 128. + "removePersonIds": [ + "str" # Optional. Array of personIds created by Person Directory + "Create Person" to be removed. + ], + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def begin_update_dynamic_person_group_with_person_changes( # pylint: disable=name-too-long + self, + dynamic_person_group_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + add_person_ids: Optional[List[str]] = None, + remove_person_ids: Optional[List[str]] = None, + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword add_person_ids: Array of personIds created by Person Directory "Create Person" to be + added. Default value is None. + :paramtype add_person_ids: list[str] + :keyword remove_person_ids: Array of personIds created by Person Directory "Create Person" to + be removed. Default value is None. + :paramtype remove_person_ids: list[str] + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update_dynamic_person_group_with_person_changes( # pylint: disable=name-too-long + self, dynamic_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> AsyncLROPoller[None]: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_update_dynamic_person_group_with_person_changes( # pylint: disable=name-too-long + self, + dynamic_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + add_person_ids: Optional[List[str]] = None, + remove_person_ids: Optional[List[str]] = None, + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :keyword add_person_ids: Array of personIds created by Person Directory "Create Person" to be + added. Default value is None. + :paramtype add_person_ids: list[str] + :keyword remove_person_ids: Array of personIds created by Person Directory "Create Person" to + be removed. Default value is None. + :paramtype remove_person_ids: list[str] + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "addPersonIds": [ + "str" # Optional. Array of personIds created by Person Directory + "Create Person" to be added. + ], + "name": "str", # Optional. User defined name, maximum length is 128. + "removePersonIds": [ + "str" # Optional. Array of personIds created by Person Directory + "Create Person" to be removed. + ], + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_dynamic_person_group_with_person_changes_initial( # type: ignore + dynamic_person_group_id=dynamic_person_group_id, + body=body, + name=name, + user_data=user_data, + add_person_ids=add_person_ids, + remove_person_ids=remove_person_ids, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @overload + async def update_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, dynamic_person_group_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + + @overload + async def update_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, + dynamic_person_group_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, dynamic_person_group_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_dynamic_person_group( # pylint: disable=inconsistent-return-statements + self, + dynamic_person_group_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + user_data: Optional[str] = None, + **kwargs: Any + ) -> None: + """Update the name or userData of an existing Dynamic Person Group, and manage its members by + adding or removing persons. + + The properties keep unchanged if they are not in request body. + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: User defined name, maximum length is 128. Default value is None. + :paramtype name: str + :keyword user_data: Optional user defined data. Length should not exceed 16K. Default value is + None. + :paramtype user_data: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str", # Optional. User defined name, maximum length is 128. + "userData": "str" # Optional. Optional user defined data. Length should not + exceed 16K. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"name": name, "userdata": user_data} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_administration_update_dynamic_person_group_request( + dynamic_person_group_id=dynamic_person_group_id, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_dynamic_person_groups( + self, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.DynamicPersonGroup]: + """List all existing Dynamic Person Groups by dynamicPersonGroupId along with name and userData. + + Dynamic Person Groups are stored in alphabetical order of dynamicPersonGroupId. + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of DynamicPersonGroup + :rtype: list[~azure.ai.vision.face.models.DynamicPersonGroup] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "dynamicPersonGroupId": "str", # ID of the dynamic person group. + Required. + "name": "str", # User defined name, maximum length is 128. Required. + "userData": "str" # Optional. Optional user defined data. Length + should not exceed 16K. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.DynamicPersonGroup]] = kwargs.pop("cls", None) + + _request = build_face_administration_get_dynamic_person_groups_request( + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.DynamicPersonGroup], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_dynamic_person_group_persons( + self, dynamic_person_group_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> _models.ListPersonResult: + """List all persons in the specified Dynamic Person Group. + + Persons are stored in alphabetical order of personId created in Person Directory "Create + Person". + > + * + + + * "start" parameter (string, optional) specifies an ID value from which returned entries will + have larger IDs based on string comparison. Setting "start" to an empty value indicates that + entries should be returned starting from the first item. + * "top" parameter (int, optional) determines the maximum number of entries to be returned, with + a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, + specify "start" with the personId of the last entry returned in the current call. + + .. + + [!TIP] + + + * For example, there are total 5 items with their IDs: "itemId1", ..., "itemId5". + + * "start=&top=" will return all 5 items. + * "start=&top=2" will return "itemId1", "itemId2". + * "start=itemId2&top=3" will return "itemId3", "itemId4", "itemId5". + + :param dynamic_person_group_id: ID of the dynamic person group. Required. + :type dynamic_person_group_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: ListPersonResult. The ListPersonResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.ListPersonResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "personIds": [ + "str" # Array of PersonDirectory Person ids. Required. + ] + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListPersonResult] = kwargs.pop("cls", None) + + _request = build_face_administration_get_dynamic_person_group_persons_request( + dynamic_person_group_id=dynamic_person_group_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListPersonResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class FaceSessionClientOperationsMixin(FaceSessionClientMixinABC): + + @overload + async def create_liveness_session( + self, body: _models.CreateLivenessSessionContent, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreateLivenessSessionResult: + # pylint: disable=line-too-long + """Create a new detect liveness session. + + A session is best for client device scenarios where developers want to authorize a client + device to perform only a liveness detection without granting full access to their resource. + Created sessions have a limited life span and only authorize clients to perform the desired + action before access is expired. + + Permissions includes... + > + * + + + * Ability to call /detectLiveness/singleModal for up to 3 retries. + * A token lifetime of 10 minutes. + + .. + + [!NOTE] + Client access can be revoked by deleting the session using the Delete Liveness Session + operation. To retrieve a result, use the Get Liveness Session. To audit the individual requests + that a client has made to your resource, use the List Liveness Session Audit Entries. + + :param body: Required. + :type body: ~azure.ai.vision.face.models.CreateLivenessSessionContent + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: CreateLivenessSessionResult. The CreateLivenessSessionResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.CreateLivenessSessionResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "livenessOperationMode": "str", # Type of liveness mode the client should + follow. Required. "Passive" + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session should + last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each end-user + device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "deviceCorrelationIdSetInClient": bool, # Optional. Whether or not to allow + client to set their own 'deviceCorrelationId' via the Vision SDK. Default is + false, and 'deviceCorrelationId' must be set in this request body. + "sendResultsToClient": bool # Optional. Whether or not to allow a '200 - + Success' response body to be sent to the client, which may be undesirable for + security reasons. Default is false, clients will receive a '204 - NoContent' + empty body response. Regardless of selection, calling Session GetResult will + always contain a response body enabling business logic to be implemented. + } + + # response body for status code(s): 200 + response == { + "authToken": "str", # Bearer token to provide authentication for the Vision + SDK running on a client application. This Bearer token has limited permissions to + perform only the required action and expires after the TTL time. It is also + auditable. Required. + "sessionId": "str" # The unique session ID of the created session. It will + expire 48 hours after it was created or may be deleted sooner using the + corresponding Session DELETE operation. Required. + } + """ + + @overload + async def create_liveness_session( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreateLivenessSessionResult: + # pylint: disable=line-too-long + """Create a new detect liveness session. + + A session is best for client device scenarios where developers want to authorize a client + device to perform only a liveness detection without granting full access to their resource. + Created sessions have a limited life span and only authorize clients to perform the desired + action before access is expired. + + Permissions includes... + > + * + + + * Ability to call /detectLiveness/singleModal for up to 3 retries. + * A token lifetime of 10 minutes. + + .. + + [!NOTE] + Client access can be revoked by deleting the session using the Delete Liveness Session + operation. To retrieve a result, use the Get Liveness Session. To audit the individual requests + that a client has made to your resource, use the List Liveness Session Audit Entries. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: CreateLivenessSessionResult. The CreateLivenessSessionResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.CreateLivenessSessionResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "authToken": "str", # Bearer token to provide authentication for the Vision + SDK running on a client application. This Bearer token has limited permissions to + perform only the required action and expires after the TTL time. It is also + auditable. Required. + "sessionId": "str" # The unique session ID of the created session. It will + expire 48 hours after it was created or may be deleted sooner using the + corresponding Session DELETE operation. Required. + } + """ + + @overload + async def create_liveness_session( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreateLivenessSessionResult: + # pylint: disable=line-too-long + """Create a new detect liveness session. + + A session is best for client device scenarios where developers want to authorize a client + device to perform only a liveness detection without granting full access to their resource. + Created sessions have a limited life span and only authorize clients to perform the desired + action before access is expired. + + Permissions includes... + > + * + + + * Ability to call /detectLiveness/singleModal for up to 3 retries. + * A token lifetime of 10 minutes. + + .. + + [!NOTE] + Client access can be revoked by deleting the session using the Delete Liveness Session + operation. To retrieve a result, use the Get Liveness Session. To audit the individual requests + that a client has made to your resource, use the List Liveness Session Audit Entries. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: CreateLivenessSessionResult. The CreateLivenessSessionResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.CreateLivenessSessionResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "authToken": "str", # Bearer token to provide authentication for the Vision + SDK running on a client application. This Bearer token has limited permissions to + perform only the required action and expires after the TTL time. It is also + auditable. Required. + "sessionId": "str" # The unique session ID of the created session. It will + expire 48 hours after it was created or may be deleted sooner using the + corresponding Session DELETE operation. Required. + } + """ + + @distributed_trace_async + async def create_liveness_session( + self, body: Union[_models.CreateLivenessSessionContent, JSON, IO[bytes]], **kwargs: Any + ) -> _models.CreateLivenessSessionResult: + # pylint: disable=line-too-long + """Create a new detect liveness session. + + A session is best for client device scenarios where developers want to authorize a client + device to perform only a liveness detection without granting full access to their resource. + Created sessions have a limited life span and only authorize clients to perform the desired + action before access is expired. + + Permissions includes... + > + * + + + * Ability to call /detectLiveness/singleModal for up to 3 retries. + * A token lifetime of 10 minutes. + + .. + + [!NOTE] + Client access can be revoked by deleting the session using the Delete Liveness Session + operation. To retrieve a result, use the Get Liveness Session. To audit the individual requests + that a client has made to your resource, use the List Liveness Session Audit Entries. + + :param body: Is one of the following types: CreateLivenessSessionContent, JSON, IO[bytes] + Required. + :type body: ~azure.ai.vision.face.models.CreateLivenessSessionContent or JSON or IO[bytes] + :return: CreateLivenessSessionResult. The CreateLivenessSessionResult is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.CreateLivenessSessionResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "livenessOperationMode": "str", # Type of liveness mode the client should + follow. Required. "Passive" + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session should + last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each end-user + device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "deviceCorrelationIdSetInClient": bool, # Optional. Whether or not to allow + client to set their own 'deviceCorrelationId' via the Vision SDK. Default is + false, and 'deviceCorrelationId' must be set in this request body. + "sendResultsToClient": bool # Optional. Whether or not to allow a '200 - + Success' response body to be sent to the client, which may be undesirable for + security reasons. Default is false, clients will receive a '204 - NoContent' + empty body response. Regardless of selection, calling Session GetResult will + always contain a response body enabling business logic to be implemented. + } + + # response body for status code(s): 200 + response == { + "authToken": "str", # Bearer token to provide authentication for the Vision + SDK running on a client application. This Bearer token has limited permissions to + perform only the required action and expires after the TTL time. It is also + auditable. Required. + "sessionId": "str" # The unique session ID of the created session. It will + expire 48 hours after it was created or may be deleted sooner using the + corresponding Session DELETE operation. Required. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.CreateLivenessSessionResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_session_create_liveness_session_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.CreateLivenessSessionResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_liveness_session( # pylint: disable=inconsistent-return-statements + self, session_id: str, **kwargs: Any + ) -> None: + """Delete all session related information for matching the specified session id. + + .. + + [!NOTE] + Deleting a session deactivates the Session Auth Token by blocking future API calls made with + that Auth Token. While this can be used to remove any access for that token, those requests + will still count towards overall resource rate limits. It's best to leverage TokenTTL to limit + length of tokens in the case that it is misused. + + :param session_id: The unique ID to reference this session. Required. + :type session_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_session_delete_liveness_session_request( + session_id=session_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_liveness_session_result(self, session_id: str, **kwargs: Any) -> _models.LivenessSession: + # pylint: disable=line-too-long + """Get session result of detectLiveness/singleModal call. + + :param session_id: The unique ID to reference this session. Required. + :type session_id: str + :return: LivenessSession. The LivenessSession is compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.LivenessSession + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "createdDateTime": "2020-02-20 00:00:00", # DateTime when this session was + created. Required. + "id": "str", # The unique ID to reference this session. Required. + "sessionExpired": bool, # Whether or not the session is expired. Required. + "status": "str", # The current status of the session. Required. Known values + are: "NotStarted", "Started", and "ResultAvailable". + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session should + last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each end-user + device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "result": { + "clientRequestId": "str", # The unique clientRequestId that is sent + by the client in the 'client-request-id' header. Required. + "digest": "str", # The server calculated digest for this request. If + the client reported digest differs from the server calculated digest, then + the message integrity between the client and service has been compromised and + the result should not be trusted. For more information, see how to guides on + how to leverage this value to secure your end-to-end solution. Required. + "id": 0, # The unique id to refer to this audit request. Use this id + with the 'start' query parameter to continue on to the next page of audit + results. Required. + "receivedDateTime": "2020-02-20 00:00:00", # The UTC DateTime that + the request was received. Required. + "request": { + "contentType": "str", # The content type of the request. + Required. + "method": "str", # The HTTP method of the request (i.e., + GET, POST, DELETE). Required. + "url": "str", # The relative URL and query of the liveness + request. Required. + "contentLength": 0, # Optional. The length of the request + body in bytes. + "userAgent": "str" # Optional. The user agent used to submit + the request. + }, + "requestId": "str", # The unique requestId that is returned by the + service to the client in the 'apim-request-id' header. Required. + "response": { + "body": { + "livenessDecision": "str", # Optional. The liveness + classification for the target face. Known values are: "uncertain", + "realface", and "spoofface". + "modelVersionUsed": "str", # Optional. The model + version used for liveness classification. Known values are: + "2020-02-15-preview.01", "2021-11-12-preview.03", + "2022-10-15-preview.04", and "2023-03-02-preview.05". + "target": { + "faceRectangle": { + "height": 0, # The height of the + rectangle, in pixels. Required. + "left": 0, # The distance from the + left edge if the image to the left edge of the rectangle, in + pixels. Required. + "top": 0, # The distance from the + top edge if the image to the top edge of the rectangle, in + pixels. Required. + "width": 0 # The width of the + rectangle, in pixels. Required. + }, + "fileName": "str", # The file name which + contains the face rectangle where the liveness classification was + made on. Required. + "imageType": "str", # The image type which + contains the face rectangle where the liveness classification was + made on. Required. Known values are: "Color", "Infrared", and + "Depth". + "timeOffsetWithinFile": 0 # The time offset + within the file of the frame which contains the face rectangle + where the liveness classification was made on. Required. + }, + "verifyResult": { + "isIdentical": bool, # Whether the target + liveness face and comparison image face match. Required. + "matchConfidence": 0.0, # The target face + liveness face and comparison image face verification confidence. + Required. + "verifyImage": { + "faceRectangle": { + "height": 0, # The height of + the rectangle, in pixels. Required. + "left": 0, # The distance + from the left edge if the image to the left edge of the + rectangle, in pixels. Required. + "top": 0, # The distance + from the top edge if the image to the top edge of the + rectangle, in pixels. Required. + "width": 0 # The width of + the rectangle, in pixels. Required. + }, + "qualityForRecognition": "str" # + Quality of face image for recognition. Required. Known values + are: "low", "medium", and "high". + } + } + }, + "latencyInMilliseconds": 0, # The server measured latency + for this request in milliseconds. Required. + "statusCode": 0 # The HTTP status code returned to the + client. Required. + }, + "sessionId": "str" # The unique sessionId of the created session. It + will expire 48 hours after it was created or may be deleted sooner using the + corresponding session DELETE operation. Required. + }, + "sessionStartDateTime": "2020-02-20 00:00:00" # Optional. DateTime when this + session was started by the client. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LivenessSession] = kwargs.pop("cls", None) + + _request = build_face_session_get_liveness_session_result_request( + session_id=session_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LivenessSession, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_liveness_sessions( + self, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.LivenessSessionItem]: + # pylint: disable=line-too-long + """Lists sessions for /detectLiveness/SingleModal. + + List sessions from the last sessionId greater than the 'start'. + + The result should be ordered by sessionId in ascending order. + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of LivenessSessionItem + :rtype: list[~azure.ai.vision.face.models.LivenessSessionItem] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "createdDateTime": "2020-02-20 00:00:00", # DateTime when this + session was created. Required. + "id": "str", # The unique ID to reference this session. Required. + "sessionExpired": bool, # Whether or not the session is expired. + Required. + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session + should last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each + end-user device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "sessionStartDateTime": "2020-02-20 00:00:00" # Optional. DateTime + when this session was started by the client. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LivenessSessionItem]] = kwargs.pop("cls", None) + + _request = build_face_session_get_liveness_sessions_request( + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LivenessSessionItem], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_liveness_session_audit_entries( + self, session_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.LivenessSessionAuditEntry]: + # pylint: disable=line-too-long + """Gets session requests and response body for the session. + + :param session_id: The unique ID to reference this session. Required. + :type session_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of LivenessSessionAuditEntry + :rtype: list[~azure.ai.vision.face.models.LivenessSessionAuditEntry] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "clientRequestId": "str", # The unique clientRequestId that is sent + by the client in the 'client-request-id' header. Required. + "digest": "str", # The server calculated digest for this request. If + the client reported digest differs from the server calculated digest, then + the message integrity between the client and service has been compromised and + the result should not be trusted. For more information, see how to guides on + how to leverage this value to secure your end-to-end solution. Required. + "id": 0, # The unique id to refer to this audit request. Use this id + with the 'start' query parameter to continue on to the next page of audit + results. Required. + "receivedDateTime": "2020-02-20 00:00:00", # The UTC DateTime that + the request was received. Required. + "request": { + "contentType": "str", # The content type of the request. + Required. + "method": "str", # The HTTP method of the request (i.e., + GET, POST, DELETE). Required. + "url": "str", # The relative URL and query of the liveness + request. Required. + "contentLength": 0, # Optional. The length of the request + body in bytes. + "userAgent": "str" # Optional. The user agent used to submit + the request. + }, + "requestId": "str", # The unique requestId that is returned by the + service to the client in the 'apim-request-id' header. Required. + "response": { + "body": { + "livenessDecision": "str", # Optional. The liveness + classification for the target face. Known values are: "uncertain", + "realface", and "spoofface". + "modelVersionUsed": "str", # Optional. The model + version used for liveness classification. Known values are: + "2020-02-15-preview.01", "2021-11-12-preview.03", + "2022-10-15-preview.04", and "2023-03-02-preview.05". + "target": { + "faceRectangle": { + "height": 0, # The height of the + rectangle, in pixels. Required. + "left": 0, # The distance from the + left edge if the image to the left edge of the rectangle, in + pixels. Required. + "top": 0, # The distance from the + top edge if the image to the top edge of the rectangle, in + pixels. Required. + "width": 0 # The width of the + rectangle, in pixels. Required. + }, + "fileName": "str", # The file name which + contains the face rectangle where the liveness classification was + made on. Required. + "imageType": "str", # The image type which + contains the face rectangle where the liveness classification was + made on. Required. Known values are: "Color", "Infrared", and + "Depth". + "timeOffsetWithinFile": 0 # The time offset + within the file of the frame which contains the face rectangle + where the liveness classification was made on. Required. + }, + "verifyResult": { + "isIdentical": bool, # Whether the target + liveness face and comparison image face match. Required. + "matchConfidence": 0.0, # The target face + liveness face and comparison image face verification confidence. + Required. + "verifyImage": { + "faceRectangle": { + "height": 0, # The height of + the rectangle, in pixels. Required. + "left": 0, # The distance + from the left edge if the image to the left edge of the + rectangle, in pixels. Required. + "top": 0, # The distance + from the top edge if the image to the top edge of the + rectangle, in pixels. Required. + "width": 0 # The width of + the rectangle, in pixels. Required. + }, + "qualityForRecognition": "str" # + Quality of face image for recognition. Required. Known values + are: "low", "medium", and "high". + } + } + }, + "latencyInMilliseconds": 0, # The server measured latency + for this request in milliseconds. Required. + "statusCode": 0 # The HTTP status code returned to the + client. Required. + }, + "sessionId": "str" # The unique sessionId of the created session. It + will expire 48 hours after it was created or may be deleted sooner using the + corresponding session DELETE operation. Required. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LivenessSessionAuditEntry]] = kwargs.pop("cls", None) + + _request = build_face_session_get_liveness_session_audit_entries_request( + session_id=session_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LivenessSessionAuditEntry], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def _create_liveness_with_verify_session( + self, body: _models.CreateLivenessSessionContent, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: ... + @overload + async def _create_liveness_with_verify_session( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: ... + @overload + async def _create_liveness_with_verify_session( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: ... + + @distributed_trace_async + async def _create_liveness_with_verify_session( + self, body: Union[_models.CreateLivenessSessionContent, JSON, IO[bytes]], **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: + # pylint: disable=line-too-long + """Create a new liveness session with verify. Client device submits VerifyImage during the + /detectLivenessWithVerify/singleModal call. + + A session is best for client device scenarios where developers want to authorize a client + device to perform only a liveness detection without granting full access to their resource. + Created sessions have a limited life span and only authorize clients to perform the desired + action before access is expired. + + Permissions includes... + > + * + + + * Ability to call /detectLivenessWithVerify/singleModal for up to 3 retries. + * A token lifetime of 10 minutes. + + .. + + [!NOTE] + + * + + + * Client access can be revoked by deleting the session using the Delete Liveness With Verify + Session operation. + * To retrieve a result, use the Get Liveness With Verify Session. + * To audit the individual requests that a client has made to your resource, use the List + Liveness With Verify Session Audit Entries. + + + Alternative Option: Client device submits VerifyImage during the + /detectLivenessWithVerify/singleModal call. + + .. + + [!NOTE] + Extra measures should be taken to validate that the client is sending the expected + VerifyImage. + + :param body: Is one of the following types: CreateLivenessSessionContent, JSON, IO[bytes] + Required. + :type body: ~azure.ai.vision.face.models.CreateLivenessSessionContent or JSON or IO[bytes] + :return: CreateLivenessWithVerifySessionResult. The CreateLivenessWithVerifySessionResult is + compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreateLivenessWithVerifySessionResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "livenessOperationMode": "str", # Type of liveness mode the client should + follow. Required. "Passive" + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session should + last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each end-user + device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "deviceCorrelationIdSetInClient": bool, # Optional. Whether or not to allow + client to set their own 'deviceCorrelationId' via the Vision SDK. Default is + false, and 'deviceCorrelationId' must be set in this request body. + "sendResultsToClient": bool # Optional. Whether or not to allow a '200 - + Success' response body to be sent to the client, which may be undesirable for + security reasons. Default is false, clients will receive a '204 - NoContent' + empty body response. Regardless of selection, calling Session GetResult will + always contain a response body enabling business logic to be implemented. + } + + # response body for status code(s): 200 + response == { + "authToken": "str", # Bearer token to provide authentication for the Vision + SDK running on a client application. This Bearer token has limited permissions to + perform only the required action and expires after the TTL time. It is also + auditable. Required. + "sessionId": "str", # The unique session ID of the created session. It will + expire 48 hours after it was created or may be deleted sooner using the + corresponding Session DELETE operation. Required. + "verifyImage": { + "faceRectangle": { + "height": 0, # The height of the rectangle, in pixels. + Required. + "left": 0, # The distance from the left edge if the image to + the left edge of the rectangle, in pixels. Required. + "top": 0, # The distance from the top edge if the image to + the top edge of the rectangle, in pixels. Required. + "width": 0 # The width of the rectangle, in pixels. + Required. + }, + "qualityForRecognition": "str" # Quality of face image for + recognition. Required. Known values are: "low", "medium", and "high". + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.CreateLivenessWithVerifySessionResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_face_session_create_liveness_with_verify_session_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.CreateLivenessWithVerifySessionResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def _create_liveness_with_verify_session_with_verify_image( # pylint: disable=protected-access,name-too-long + self, body: _models._models.CreateLivenessWithVerifySessionContent, **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: ... + @overload + async def _create_liveness_with_verify_session_with_verify_image( # pylint: disable=name-too-long + self, body: JSON, **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: ... + + @distributed_trace_async + async def _create_liveness_with_verify_session_with_verify_image( # pylint: disable=name-too-long + self, body: Union[_models._models.CreateLivenessWithVerifySessionContent, JSON], **kwargs: Any + ) -> _models.CreateLivenessWithVerifySessionResult: + # pylint: disable=line-too-long + """Create a new liveness session with verify. Provide the verify image during session creation. + + A session is best for client device scenarios where developers want to authorize a client + device to perform only a liveness detection without granting full access to their resource. + Created sessions have a limited life span and only authorize clients to perform the desired + action before access is expired. + + Permissions includes... + > + * + + + * Ability to call /detectLivenessWithVerify/singleModal for up to 3 retries. + * A token lifetime of 10 minutes. + + .. + + [!NOTE] + + * + + + * Client access can be revoked by deleting the session using the Delete Liveness With Verify + Session operation. + * To retrieve a result, use the Get Liveness With Verify Session. + * To audit the individual requests that a client has made to your resource, use the List + Liveness With Verify Session Audit Entries. + + + Recommended Option: VerifyImage is provided during session creation. + + :param body: Is either a CreateLivenessWithVerifySessionContent type or a JSON type. Required. + :type body: ~azure.ai.vision.face.models._models.CreateLivenessWithVerifySessionContent or JSON + :return: CreateLivenessWithVerifySessionResult. The CreateLivenessWithVerifySessionResult is + compatible with MutableMapping + :rtype: ~azure.ai.vision.face.models.CreateLivenessWithVerifySessionResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "Parameters": { + "livenessOperationMode": "str", # Type of liveness mode the client + should follow. Required. "Passive" + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session + should last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each + end-user device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "deviceCorrelationIdSetInClient": bool, # Optional. Whether or not + to allow client to set their own 'deviceCorrelationId' via the Vision SDK. + Default is false, and 'deviceCorrelationId' must be set in this request body. + "sendResultsToClient": bool # Optional. Whether or not to allow a + '200 - Success' response body to be sent to the client, which may be + undesirable for security reasons. Default is false, clients will receive a + '204 - NoContent' empty body response. Regardless of selection, calling + Session GetResult will always contain a response body enabling business logic + to be implemented. + }, + "VerifyImage": filetype + } + + # response body for status code(s): 200 + response == { + "authToken": "str", # Bearer token to provide authentication for the Vision + SDK running on a client application. This Bearer token has limited permissions to + perform only the required action and expires after the TTL time. It is also + auditable. Required. + "sessionId": "str", # The unique session ID of the created session. It will + expire 48 hours after it was created or may be deleted sooner using the + corresponding Session DELETE operation. Required. + "verifyImage": { + "faceRectangle": { + "height": 0, # The height of the rectangle, in pixels. + Required. + "left": 0, # The distance from the left edge if the image to + the left edge of the rectangle, in pixels. Required. + "top": 0, # The distance from the top edge if the image to + the top edge of the rectangle, in pixels. Required. + "width": 0 # The width of the rectangle, in pixels. + Required. + }, + "qualityForRecognition": "str" # Quality of face image for + recognition. Required. Known values are: "low", "medium", and "high". + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.CreateLivenessWithVerifySessionResult] = kwargs.pop("cls", None) + + _body = body.as_dict() if isinstance(body, _model_base.Model) else body + _file_fields: List[str] = ["VerifyImage"] + _data_fields: List[str] = ["Parameters"] + _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) + + _request = build_face_session_create_liveness_with_verify_session_with_verify_image_request( + files=_files, + data=_data, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.CreateLivenessWithVerifySessionResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_liveness_with_verify_session( # pylint: disable=inconsistent-return-statements + self, session_id: str, **kwargs: Any + ) -> None: + """Delete all session related information for matching the specified session id. + + .. + + [!NOTE] + Deleting a session deactivates the Session Auth Token by blocking future API calls made with + that Auth Token. While this can be used to remove any access for that token, those requests + will still count towards overall resource rate limits. It's best to leverage TokenTTL to limit + length of tokens in the case that it is misused. + + :param session_id: The unique ID to reference this session. Required. + :type session_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_face_session_delete_liveness_with_verify_session_request( + session_id=session_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_liveness_with_verify_session_result( + self, session_id: str, **kwargs: Any + ) -> _models.LivenessWithVerifySession: + # pylint: disable=line-too-long + """Get session result of detectLivenessWithVerify/singleModal call. + + :param session_id: The unique ID to reference this session. Required. + :type session_id: str + :return: LivenessWithVerifySession. The LivenessWithVerifySession is compatible with + MutableMapping + :rtype: ~azure.ai.vision.face.models.LivenessWithVerifySession + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "createdDateTime": "2020-02-20 00:00:00", # DateTime when this session was + created. Required. + "id": "str", # The unique ID to reference this session. Required. + "sessionExpired": bool, # Whether or not the session is expired. Required. + "status": "str", # The current status of the session. Required. Known values + are: "NotStarted", "Started", and "ResultAvailable". + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session should + last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each end-user + device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "result": { + "clientRequestId": "str", # The unique clientRequestId that is sent + by the client in the 'client-request-id' header. Required. + "digest": "str", # The server calculated digest for this request. If + the client reported digest differs from the server calculated digest, then + the message integrity between the client and service has been compromised and + the result should not be trusted. For more information, see how to guides on + how to leverage this value to secure your end-to-end solution. Required. + "id": 0, # The unique id to refer to this audit request. Use this id + with the 'start' query parameter to continue on to the next page of audit + results. Required. + "receivedDateTime": "2020-02-20 00:00:00", # The UTC DateTime that + the request was received. Required. + "request": { + "contentType": "str", # The content type of the request. + Required. + "method": "str", # The HTTP method of the request (i.e., + GET, POST, DELETE). Required. + "url": "str", # The relative URL and query of the liveness + request. Required. + "contentLength": 0, # Optional. The length of the request + body in bytes. + "userAgent": "str" # Optional. The user agent used to submit + the request. + }, + "requestId": "str", # The unique requestId that is returned by the + service to the client in the 'apim-request-id' header. Required. + "response": { + "body": { + "livenessDecision": "str", # Optional. The liveness + classification for the target face. Known values are: "uncertain", + "realface", and "spoofface". + "modelVersionUsed": "str", # Optional. The model + version used for liveness classification. Known values are: + "2020-02-15-preview.01", "2021-11-12-preview.03", + "2022-10-15-preview.04", and "2023-03-02-preview.05". + "target": { + "faceRectangle": { + "height": 0, # The height of the + rectangle, in pixels. Required. + "left": 0, # The distance from the + left edge if the image to the left edge of the rectangle, in + pixels. Required. + "top": 0, # The distance from the + top edge if the image to the top edge of the rectangle, in + pixels. Required. + "width": 0 # The width of the + rectangle, in pixels. Required. + }, + "fileName": "str", # The file name which + contains the face rectangle where the liveness classification was + made on. Required. + "imageType": "str", # The image type which + contains the face rectangle where the liveness classification was + made on. Required. Known values are: "Color", "Infrared", and + "Depth". + "timeOffsetWithinFile": 0 # The time offset + within the file of the frame which contains the face rectangle + where the liveness classification was made on. Required. + }, + "verifyResult": { + "isIdentical": bool, # Whether the target + liveness face and comparison image face match. Required. + "matchConfidence": 0.0, # The target face + liveness face and comparison image face verification confidence. + Required. + "verifyImage": { + "faceRectangle": { + "height": 0, # The height of + the rectangle, in pixels. Required. + "left": 0, # The distance + from the left edge if the image to the left edge of the + rectangle, in pixels. Required. + "top": 0, # The distance + from the top edge if the image to the top edge of the + rectangle, in pixels. Required. + "width": 0 # The width of + the rectangle, in pixels. Required. + }, + "qualityForRecognition": "str" # + Quality of face image for recognition. Required. Known values + are: "low", "medium", and "high". + } + } + }, + "latencyInMilliseconds": 0, # The server measured latency + for this request in milliseconds. Required. + "statusCode": 0 # The HTTP status code returned to the + client. Required. + }, + "sessionId": "str" # The unique sessionId of the created session. It + will expire 48 hours after it was created or may be deleted sooner using the + corresponding session DELETE operation. Required. + }, + "sessionStartDateTime": "2020-02-20 00:00:00" # Optional. DateTime when this + session was started by the client. + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.LivenessWithVerifySession] = kwargs.pop("cls", None) + + _request = build_face_session_get_liveness_with_verify_session_result_request( + session_id=session_id, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LivenessWithVerifySession, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_liveness_with_verify_sessions( + self, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.LivenessSessionItem]: + # pylint: disable=line-too-long + """Lists sessions for /detectLivenessWithVerify/SingleModal. + + List sessions from the last sessionId greater than the "start". + + The result should be ordered by sessionId in ascending order. + + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of LivenessSessionItem + :rtype: list[~azure.ai.vision.face.models.LivenessSessionItem] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "createdDateTime": "2020-02-20 00:00:00", # DateTime when this + session was created. Required. + "id": "str", # The unique ID to reference this session. Required. + "sessionExpired": bool, # Whether or not the session is expired. + Required. + "authTokenTimeToLiveInSeconds": 0, # Optional. Seconds the session + should last for. Range is 60 to 86400 seconds. Default value is 600. + "deviceCorrelationId": "str", # Optional. Unique Guid per each + end-user device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + "sessionStartDateTime": "2020-02-20 00:00:00" # Optional. DateTime + when this session was started by the client. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LivenessSessionItem]] = kwargs.pop("cls", None) + + _request = build_face_session_get_liveness_with_verify_sessions_request( + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LivenessSessionItem], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_liveness_with_verify_session_audit_entries( # pylint: disable=name-too-long + self, session_id: str, *, start: Optional[str] = None, top: Optional[int] = None, **kwargs: Any + ) -> List[_models.LivenessSessionAuditEntry]: + # pylint: disable=line-too-long + """Gets session requests and response body for the session. + + :param session_id: The unique ID to reference this session. Required. + :type session_id: str + :keyword start: List resources greater than the "start". It contains no more than 64 + characters. Default is empty. Default value is None. + :paramtype start: str + :keyword top: The number of items to list, ranging in [1, 1000]. Default is 1000. Default value + is None. + :paramtype top: int + :return: list of LivenessSessionAuditEntry + :rtype: list[~azure.ai.vision.face.models.LivenessSessionAuditEntry] + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == [ + { + "clientRequestId": "str", # The unique clientRequestId that is sent + by the client in the 'client-request-id' header. Required. + "digest": "str", # The server calculated digest for this request. If + the client reported digest differs from the server calculated digest, then + the message integrity between the client and service has been compromised and + the result should not be trusted. For more information, see how to guides on + how to leverage this value to secure your end-to-end solution. Required. + "id": 0, # The unique id to refer to this audit request. Use this id + with the 'start' query parameter to continue on to the next page of audit + results. Required. + "receivedDateTime": "2020-02-20 00:00:00", # The UTC DateTime that + the request was received. Required. + "request": { + "contentType": "str", # The content type of the request. + Required. + "method": "str", # The HTTP method of the request (i.e., + GET, POST, DELETE). Required. + "url": "str", # The relative URL and query of the liveness + request. Required. + "contentLength": 0, # Optional. The length of the request + body in bytes. + "userAgent": "str" # Optional. The user agent used to submit + the request. + }, + "requestId": "str", # The unique requestId that is returned by the + service to the client in the 'apim-request-id' header. Required. + "response": { + "body": { + "livenessDecision": "str", # Optional. The liveness + classification for the target face. Known values are: "uncertain", + "realface", and "spoofface". + "modelVersionUsed": "str", # Optional. The model + version used for liveness classification. Known values are: + "2020-02-15-preview.01", "2021-11-12-preview.03", + "2022-10-15-preview.04", and "2023-03-02-preview.05". + "target": { + "faceRectangle": { + "height": 0, # The height of the + rectangle, in pixels. Required. + "left": 0, # The distance from the + left edge if the image to the left edge of the rectangle, in + pixels. Required. + "top": 0, # The distance from the + top edge if the image to the top edge of the rectangle, in + pixels. Required. + "width": 0 # The width of the + rectangle, in pixels. Required. + }, + "fileName": "str", # The file name which + contains the face rectangle where the liveness classification was + made on. Required. + "imageType": "str", # The image type which + contains the face rectangle where the liveness classification was + made on. Required. Known values are: "Color", "Infrared", and + "Depth". + "timeOffsetWithinFile": 0 # The time offset + within the file of the frame which contains the face rectangle + where the liveness classification was made on. Required. + }, + "verifyResult": { + "isIdentical": bool, # Whether the target + liveness face and comparison image face match. Required. + "matchConfidence": 0.0, # The target face + liveness face and comparison image face verification confidence. + Required. + "verifyImage": { + "faceRectangle": { + "height": 0, # The height of + the rectangle, in pixels. Required. + "left": 0, # The distance + from the left edge if the image to the left edge of the + rectangle, in pixels. Required. + "top": 0, # The distance + from the top edge if the image to the top edge of the + rectangle, in pixels. Required. + "width": 0 # The width of + the rectangle, in pixels. Required. + }, + "qualityForRecognition": "str" # + Quality of face image for recognition. Required. Known values + are: "low", "medium", and "high". + } + } + }, + "latencyInMilliseconds": 0, # The server measured latency + for this request in milliseconds. Required. + "statusCode": 0 # The HTTP status code returned to the + client. Required. + }, + "sessionId": "str" # The unique sessionId of the created session. It + will expire 48 hours after it was created or may be deleted sooner using the + corresponding session DELETE operation. Required. + } + ] + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.LivenessSessionAuditEntry]] = kwargs.pop("cls", None) + + _request = build_face_session_get_liveness_with_verify_session_audit_entries_request( + session_id=session_id, + start=start, + top=top, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.FaceErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(List[_models.LivenessSessionAuditEntry], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_operations/_patch.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_operations/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_patch.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_vendor.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_vendor.py new file mode 100644 index 000000000000..9f2bd2f22400 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/aio/_vendor.py @@ -0,0 +1,50 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +import json +from typing import Any, Dict, IO, List, Mapping, Optional, Sequence, TYPE_CHECKING, Tuple, Union + +from ._configuration import ( + FaceAdministrationClientConfiguration, + FaceClientConfiguration, + FaceSessionClientConfiguration, +) +from ._model_base import Model, SdkJSONEncoder + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core import AsyncPipelineClient + + from .._serialization import Deserializer, Serializer + + +class FaceClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "AsyncPipelineClient" + _config: FaceClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +class FaceAdministrationClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "AsyncPipelineClient" + _config: FaceAdministrationClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +class FaceSessionClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "AsyncPipelineClient" + _config: FaceSessionClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/__init__.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/__init__.py new file mode 100644 index 000000000000..dcbd91525083 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/__init__.py @@ -0,0 +1,167 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._models import AccessoryItem +from ._models import AddFaceResult +from ._models import AuditLivenessResponseInfo +from ._models import AuditRequestInfo +from ._models import BlurProperties +from ._models import CreateLivenessSessionContent +from ._models import CreateLivenessSessionResult +from ._models import CreateLivenessWithVerifySessionResult +from ._models import CreatePersonResult +from ._models import DynamicPersonGroup +from ._models import ExposureProperties +from ._models import FaceAttributes +from ._models import FaceCollectionTrainingResult +from ._models import FaceDetectionResult +from ._models import FaceError +from ._models import FaceErrorResponse +from ._models import FaceFindSimilarResult +from ._models import FaceGroupingResult +from ._models import FaceIdentificationCandidate +from ._models import FaceIdentificationResult +from ._models import FaceLandmarks +from ._models import FaceList +from ._models import FaceListFace +from ._models import FaceListItem +from ._models import FaceOperationResult +from ._models import FaceRectangle +from ._models import FaceVerificationResult +from ._models import FacialHair +from ._models import HairColor +from ._models import HairProperties +from ._models import HeadPose +from ._models import LandmarkCoordinate +from ._models import LargeFaceList +from ._models import LargeFaceListFace +from ._models import LargePersonGroup +from ._models import LargePersonGroupPerson +from ._models import LargePersonGroupPersonFace +from ._models import ListFaceResult +from ._models import ListGroupReferenceResult +from ._models import ListPersonResult +from ._models import LivenessOutputsTarget +from ._models import LivenessResponseBody +from ._models import LivenessSession +from ._models import LivenessSessionAuditEntry +from ._models import LivenessSessionItem +from ._models import LivenessWithVerifyImage +from ._models import LivenessWithVerifyOutputs +from ._models import LivenessWithVerifySession +from ._models import MaskProperties +from ._models import NoiseProperties +from ._models import OcclusionProperties +from ._models import PersonDirectoryFace +from ._models import PersonDirectoryPerson +from ._models import PersonGroup +from ._models import PersonGroupPerson +from ._models import PersonGroupPersonFace + +from ._enums import AccessoryType +from ._enums import BlurLevel +from ._enums import ExposureLevel +from ._enums import FaceAttributeType +from ._enums import FaceDetectionModel +from ._enums import FaceImageType +from ._enums import FaceLivenessDecision +from ._enums import FaceOperationStatus +from ._enums import FaceRecognitionModel +from ._enums import FaceSessionStatus +from ._enums import FindSimilarMatchMode +from ._enums import GlassesType +from ._enums import HairColorType +from ._enums import LivenessModel +from ._enums import LivenessOperationMode +from ._enums import MaskType +from ._enums import NoiseLevel +from ._enums import QualityForRecognition +from ._enums import Versions +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AccessoryItem", + "AddFaceResult", + "AuditLivenessResponseInfo", + "AuditRequestInfo", + "BlurProperties", + "CreateLivenessSessionContent", + "CreateLivenessSessionResult", + "CreateLivenessWithVerifySessionResult", + "CreatePersonResult", + "DynamicPersonGroup", + "ExposureProperties", + "FaceAttributes", + "FaceCollectionTrainingResult", + "FaceDetectionResult", + "FaceError", + "FaceErrorResponse", + "FaceFindSimilarResult", + "FaceGroupingResult", + "FaceIdentificationCandidate", + "FaceIdentificationResult", + "FaceLandmarks", + "FaceList", + "FaceListFace", + "FaceListItem", + "FaceOperationResult", + "FaceRectangle", + "FaceVerificationResult", + "FacialHair", + "HairColor", + "HairProperties", + "HeadPose", + "LandmarkCoordinate", + "LargeFaceList", + "LargeFaceListFace", + "LargePersonGroup", + "LargePersonGroupPerson", + "LargePersonGroupPersonFace", + "ListFaceResult", + "ListGroupReferenceResult", + "ListPersonResult", + "LivenessOutputsTarget", + "LivenessResponseBody", + "LivenessSession", + "LivenessSessionAuditEntry", + "LivenessSessionItem", + "LivenessWithVerifyImage", + "LivenessWithVerifyOutputs", + "LivenessWithVerifySession", + "MaskProperties", + "NoiseProperties", + "OcclusionProperties", + "PersonDirectoryFace", + "PersonDirectoryPerson", + "PersonGroup", + "PersonGroupPerson", + "PersonGroupPersonFace", + "AccessoryType", + "BlurLevel", + "ExposureLevel", + "FaceAttributeType", + "FaceDetectionModel", + "FaceImageType", + "FaceLivenessDecision", + "FaceOperationStatus", + "FaceRecognitionModel", + "FaceSessionStatus", + "FindSimilarMatchMode", + "GlassesType", + "HairColorType", + "LivenessModel", + "LivenessOperationMode", + "MaskType", + "NoiseLevel", + "QualityForRecognition", + "Versions", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/_enums.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/_enums.py new file mode 100644 index 000000000000..4ebb4c54d967 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/_enums.py @@ -0,0 +1,258 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class AccessoryType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of the accessory.""" + + HEADWEAR = "headwear" + """Head wear.""" + GLASSES = "glasses" + """Glasses.""" + MASK = "mask" + """Mask.""" + + +class BlurLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates level of blurriness.""" + + LOW = "low" + """Low blur level.""" + MEDIUM = "medium" + """Medium blur level.""" + HIGH = "high" + """High blur level.""" + + +class ExposureLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates level of exposure.""" + + UNDER_EXPOSURE = "underExposure" + """Low exposure level.""" + GOOD_EXPOSURE = "goodExposure" + """Good exposure level.""" + OVER_EXPOSURE = "overExposure" + """High exposure level.""" + + +class FaceAttributeType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Available options for detect face with attribute.""" + + HEAD_POSE = "headPose" + """3-D roll/yaw/pitch angles for face direction.""" + GLASSES = "glasses" + """Glasses type. Values include 'NoGlasses', 'ReadingGlasses', 'Sunglasses', 'SwimmingGoggles'.""" + OCCLUSION = "occlusion" + """Whether each facial area is occluded, including forehead, eyes and mouth.""" + ACCESSORIES = "accessories" + """Accessories around face, including 'headwear', 'glasses' and 'mask'. Empty array means no + accessories detected. Note this is after a face is detected. Large mask could result in no face + to be detected.""" + BLUR = "blur" + """Face is blurry or not. Level returns 'Low', 'Medium' or 'High'. Value returns a number between + [0,1], the larger the blurrier.""" + EXPOSURE = "exposure" + """Face exposure level. Level returns 'GoodExposure', 'OverExposure' or 'UnderExposure'.""" + NOISE = "noise" + """Noise level of face pixels. Level returns 'Low', 'Medium' and 'High'. Value returns a number + between [0,1], the larger the noisier""" + MASK = "mask" + """Whether each face is wearing a mask. Mask type returns 'noMask', 'faceMask', + 'otherMaskOrOcclusion', or 'uncertain'. Value returns a boolean 'noseAndMouthCovered' + indicating whether nose and mouth are covered.""" + QUALITY_FOR_RECOGNITION = "qualityForRecognition" + """The overall image quality regarding whether the image being used in the detection is of + sufficient quality to attempt face recognition on. The value is an informal rating of low, + medium, or high. Only 'high' quality images are recommended for person enrollment and quality + at or above 'medium' is recommended for identification scenarios. The attribute is only + available when using any combinations of detection models detection_01 or detection_03, and + recognition models recognition_03 or recognition_04.""" + AGE = "age" + """Age in years.""" + SMILE = "smile" + """Smile intensity, a number between [0,1].""" + FACIAL_HAIR = "facialHair" + """Properties describing facial hair attributes.""" + HAIR = "hair" + """Properties describing hair attributes.""" + + +class FaceDetectionModel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The detection model for the face.""" + + DETECTION_01 = "detection_01" + """The default detection model. Recommend for near frontal face detection. For scenarios with + exceptionally large angle (head-pose) faces, occluded faces or wrong image orientation, the + faces in such cases may not be detected.""" + DETECTION_02 = "detection_02" + """Detection model released in 2019 May with improved accuracy especially on small, side and + blurry faces.""" + DETECTION_03 = "detection_03" + """Detection model released in 2021 February with improved accuracy especially on small faces.""" + + +class FaceImageType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of image.""" + + COLOR = "Color" + INFRARED = "Infrared" + DEPTH = "Depth" + + +class FaceLivenessDecision(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The outcome of the liveness classification.""" + + UNCERTAIN = "uncertain" + """The algorithm could not classify the target face as either real or spoof.""" + REALFACE = "realface" + """The algorithm has classified the target face as real.""" + SPOOFFACE = "spoofface" + """The algorithm has classified the target face as a spoof.""" + + +class FaceOperationStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The status of long running operation.""" + + NOT_STARTED = "notStarted" + """The operation is not started.""" + RUNNING = "running" + """The operation is still running.""" + SUCCEEDED = "succeeded" + """The operation is succeeded.""" + FAILED = "failed" + """The operation is failed.""" + + +class FaceRecognitionModel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The recognition model for the face.""" + + RECOGNITION_01 = "recognition_01" + """The default recognition model for "Detect". All those faceIds created before 2019 March are + bonded with this recognition model.""" + RECOGNITION_02 = "recognition_02" + """Recognition model released in 2019 March.""" + RECOGNITION_03 = "recognition_03" + """Recognition model released in 2020 May.""" + RECOGNITION_04 = "recognition_04" + """Recognition model released in 2021 February. It's recommended to use this recognition model for + better recognition accuracy.""" + + +class FaceSessionStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The current status of the session.""" + + NOT_STARTED = "NotStarted" + """Session has not started.""" + STARTED = "Started" + """Session has started.""" + RESULT_AVAILABLE = "ResultAvailable" + """Session has available result.""" + + +class FindSimilarMatchMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Similar face searching mode.""" + + MATCH_PERSON = "matchPerson" + """Match person.""" + MATCH_FACE = "matchFace" + """Match face.""" + + +class GlassesType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Glasses type of the face.""" + + NO_GLASSES = "noGlasses" + """No glasses on the face.""" + READING_GLASSES = "readingGlasses" + """Normal glasses on the face.""" + SUNGLASSES = "sunglasses" + """Sunglasses on the face.""" + SWIMMING_GOGGLES = "swimmingGoggles" + """Swimming goggles on the face.""" + + +class HairColorType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Name of the hair color.""" + + UNKNOWN_HAIR_COLOR = "unknown" + """Unknown.""" + WHITE = "white" + """White.""" + GRAY = "gray" + """Gray.""" + BLOND = "blond" + """Blond.""" + BROWN = "brown" + """Brown.""" + RED = "red" + """Red.""" + BLACK = "black" + """Black.""" + OTHER = "other" + """Other.""" + + +class LivenessModel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The model version used for liveness classification.""" + + ENUM_2020_02_15_PREVIEW_01 = "2020-02-15-preview.01" + ENUM_2021_11_12_PREVIEW_03 = "2021-11-12-preview.03" + ENUM_2022_10_15_PREVIEW_04 = "2022-10-15-preview.04" + ENUM_2023_03_02_PREVIEW_05 = "2023-03-02-preview.05" + + +class LivenessOperationMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The operation mode for the liveness modal.""" + + PASSIVE = "Passive" + """The operation mode for the liveness modal.""" + + +class MaskType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of the mask.""" + + FACE_MASK = "faceMask" + """Face mask.""" + NO_MASK = "noMask" + """No mask.""" + OTHER_MASK_OR_OCCLUSION = "otherMaskOrOcclusion" + """Other types of mask or occlusion.""" + UNCERTAIN = "uncertain" + """Uncertain.""" + + +class NoiseLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates level of noise.""" + + LOW = "low" + """Low noise level.""" + MEDIUM = "medium" + """Medium noise level.""" + HIGH = "high" + """High noise level.""" + + +class QualityForRecognition(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates quality of image for recognition.""" + + LOW = "low" + """Low quality.""" + MEDIUM = "medium" + """Medium quality.""" + HIGH = "high" + """High quality.""" + + +class Versions(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """API versions for Azure AI Face API.""" + + V1_1_PREVIEW_1 = "v1.1-preview.1" + """v1.1-preview.1""" diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/_models.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/_models.py new file mode 100644 index 000000000000..c917c9209d29 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/_models.py @@ -0,0 +1,2763 @@ +# coding=utf-8 +# pylint: disable=too-many-lines +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +import sys +from typing import Any, List, Mapping, Optional, TYPE_CHECKING, Union, overload + +from .. import _model_base +from .._model_base import rest_field +from .._vendor import FileType + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from .. import models as _models +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object + + +class AccessoryItem(_model_base.Model): + """Accessory item and corresponding confidence level. + + All required parameters must be populated in order to send to server. + + :ivar type: Type of the accessory. Required. Known values are: "headwear", "glasses", and + "mask". + :vartype type: str or ~azure.ai.vision.face.models.AccessoryType + :ivar confidence: Confidence level of the accessory type. Range between [0,1]. Required. + :vartype confidence: float + """ + + type: Union[str, "_models.AccessoryType"] = rest_field() + """Type of the accessory. Required. Known values are: \"headwear\", \"glasses\", and \"mask\".""" + confidence: float = rest_field() + """Confidence level of the accessory type. Range between [0,1]. Required.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.AccessoryType"], + confidence: float, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class AddFaceResult(_model_base.Model): + """Response body for adding face. + + All required parameters must be populated in order to send to server. + + :ivar persisted_face_id: Persisted Face ID of the added face, which is persisted and will not + expire. Different from faceId which is created in "Detect" and will expire in 24 hours after + the detection call. Required. + :vartype persisted_face_id: str + """ + + persisted_face_id: str = rest_field(name="persistedFaceId") + """Persisted Face ID of the added face, which is persisted and will not expire. Different from + faceId which is created in \"Detect\" and will expire in 24 hours after the detection call. + Required.""" + + @overload + def __init__( + self, + *, + persisted_face_id: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class AuditLivenessResponseInfo(_model_base.Model): + """Audit entry for a response in the session. + + All required parameters must be populated in order to send to server. + + :ivar body: The response body. The schema of this field will depend on the request.url and + request.method used by the client. Required. + :vartype body: ~azure.ai.vision.face.models.LivenessResponseBody + :ivar status_code: The HTTP status code returned to the client. Required. + :vartype status_code: int + :ivar latency_in_milliseconds: The server measured latency for this request in milliseconds. + Required. + :vartype latency_in_milliseconds: int + """ + + body: "_models.LivenessResponseBody" = rest_field() + """The response body. The schema of this field will depend on the request.url and request.method + used by the client. Required.""" + status_code: int = rest_field(name="statusCode") + """The HTTP status code returned to the client. Required.""" + latency_in_milliseconds: int = rest_field(name="latencyInMilliseconds") + """The server measured latency for this request in milliseconds. Required.""" + + @overload + def __init__( + self, + *, + body: "_models.LivenessResponseBody", + status_code: int, + latency_in_milliseconds: int, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class AuditRequestInfo(_model_base.Model): + """Audit entry for a request in the session. + + All required parameters must be populated in order to send to server. + + :ivar url: The relative URL and query of the liveness request. Required. + :vartype url: str + :ivar method: The HTTP method of the request (i.e., GET, POST, DELETE). Required. + :vartype method: str + :ivar content_length: The length of the request body in bytes. + :vartype content_length: int + :ivar content_type: The content type of the request. Required. + :vartype content_type: str + :ivar user_agent: The user agent used to submit the request. + :vartype user_agent: str + """ + + url: str = rest_field() + """The relative URL and query of the liveness request. Required.""" + method: str = rest_field() + """The HTTP method of the request (i.e., GET, POST, DELETE). Required.""" + content_length: Optional[int] = rest_field(name="contentLength") + """The length of the request body in bytes.""" + content_type: str = rest_field(name="contentType") + """The content type of the request. Required.""" + user_agent: Optional[str] = rest_field(name="userAgent") + """The user agent used to submit the request.""" + + @overload + def __init__( + self, + *, + url: str, + method: str, + content_type: str, + content_length: Optional[int] = None, + user_agent: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class BlurProperties(_model_base.Model): + """Properties describing any presence of blur within the image. + + All required parameters must be populated in order to send to server. + + :ivar blur_level: An enum value indicating level of blurriness. Required. Known values are: + "low", "medium", and "high". + :vartype blur_level: str or ~azure.ai.vision.face.models.BlurLevel + :ivar value: A number indicating level of blurriness ranging from 0 to 1. Required. + :vartype value: float + """ + + blur_level: Union[str, "_models.BlurLevel"] = rest_field(name="blurLevel") + """An enum value indicating level of blurriness. Required. Known values are: \"low\", \"medium\", + and \"high\".""" + value: float = rest_field() + """A number indicating level of blurriness ranging from 0 to 1. Required.""" + + @overload + def __init__( + self, + *, + blur_level: Union[str, "_models.BlurLevel"], + value: float, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class CreateLivenessSessionContent(_model_base.Model): + """Request for creating liveness session. + + All required parameters must be populated in order to send to server. + + :ivar liveness_operation_mode: Type of liveness mode the client should follow. Required. + "Passive" + :vartype liveness_operation_mode: str or ~azure.ai.vision.face.models.LivenessOperationMode + :ivar send_results_to_client: Whether or not to allow a '200 - Success' response body to be + sent to the client, which may be undesirable for security reasons. Default is false, clients + will receive a '204 - NoContent' empty body response. Regardless of selection, calling Session + GetResult will always contain a response body enabling business logic to be implemented. + :vartype send_results_to_client: bool + :ivar device_correlation_id_set_in_client: Whether or not to allow client to set their own + 'deviceCorrelationId' via the Vision SDK. Default is false, and 'deviceCorrelationId' must be + set in this request body. + :vartype device_correlation_id_set_in_client: bool + :ivar device_correlation_id: Unique Guid per each end-user device. This is to provide rate + limiting and anti-hammering. If 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + :vartype device_correlation_id: str + :ivar auth_token_time_to_live_in_seconds: Seconds the session should last for. Range is 60 to + 86400 seconds. Default value is 600. + :vartype auth_token_time_to_live_in_seconds: int + """ + + liveness_operation_mode: Union[str, "_models.LivenessOperationMode"] = rest_field(name="livenessOperationMode") + """Type of liveness mode the client should follow. Required. \"Passive\"""" + send_results_to_client: Optional[bool] = rest_field(name="sendResultsToClient") + """Whether or not to allow a '200 - Success' response body to be sent to the client, which may be + undesirable for security reasons. Default is false, clients will receive a '204 - NoContent' + empty body response. Regardless of selection, calling Session GetResult will always contain a + response body enabling business logic to be implemented.""" + device_correlation_id_set_in_client: Optional[bool] = rest_field(name="deviceCorrelationIdSetInClient") + """Whether or not to allow client to set their own 'deviceCorrelationId' via the Vision SDK. + Default is false, and 'deviceCorrelationId' must be set in this request body.""" + device_correlation_id: Optional[str] = rest_field(name="deviceCorrelationId") + """Unique Guid per each end-user device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this 'deviceCorrelationId' must be + null.""" + auth_token_time_to_live_in_seconds: Optional[int] = rest_field(name="authTokenTimeToLiveInSeconds") + """Seconds the session should last for. Range is 60 to 86400 seconds. Default value is 600.""" + + @overload + def __init__( + self, + *, + liveness_operation_mode: Union[str, "_models.LivenessOperationMode"], + send_results_to_client: Optional[bool] = None, + device_correlation_id_set_in_client: Optional[bool] = None, + device_correlation_id: Optional[str] = None, + auth_token_time_to_live_in_seconds: Optional[int] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class CreateLivenessSessionContentForMultipart(_model_base.Model): + """Dedicated parameter model for multipart/form-data. + + All required parameters must be populated in order to send to server. + + :ivar liveness_operation_mode: Type of liveness mode the client should follow. Required. + "Passive" + :vartype liveness_operation_mode: str or ~azure.ai.vision.face.models.LivenessOperationMode + :ivar send_results_to_client: Whether or not to allow a '200 - Success' response body to be + sent to the client, which may be undesirable for security reasons. Default is false, clients + will receive a '204 - NoContent' empty body response. Regardless of selection, calling Session + GetResult will always contain a response body enabling business logic to be implemented. + :vartype send_results_to_client: bool + :ivar device_correlation_id_set_in_client: Whether or not to allow client to set their own + 'deviceCorrelationId' via the Vision SDK. Default is false, and 'deviceCorrelationId' must be + set in this request body. + :vartype device_correlation_id_set_in_client: bool + :ivar device_correlation_id: Unique Guid per each end-user device. This is to provide rate + limiting and anti-hammering. If 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + :vartype device_correlation_id: str + :ivar auth_token_time_to_live_in_seconds: Seconds the session should last for. Range is 60 to + 86400 seconds. Default value is 600. + :vartype auth_token_time_to_live_in_seconds: int + """ + + liveness_operation_mode: Union[str, "_models.LivenessOperationMode"] = rest_field(name="livenessOperationMode") + """Type of liveness mode the client should follow. Required. \"Passive\"""" + send_results_to_client: Optional[bool] = rest_field(name="sendResultsToClient") + """Whether or not to allow a '200 - Success' response body to be sent to the client, which may be + undesirable for security reasons. Default is false, clients will receive a '204 - NoContent' + empty body response. Regardless of selection, calling Session GetResult will always contain a + response body enabling business logic to be implemented.""" + device_correlation_id_set_in_client: Optional[bool] = rest_field(name="deviceCorrelationIdSetInClient") + """Whether or not to allow client to set their own 'deviceCorrelationId' via the Vision SDK. + Default is false, and 'deviceCorrelationId' must be set in this request body.""" + device_correlation_id: Optional[str] = rest_field(name="deviceCorrelationId") + """Unique Guid per each end-user device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this 'deviceCorrelationId' must be + null.""" + auth_token_time_to_live_in_seconds: Optional[int] = rest_field(name="authTokenTimeToLiveInSeconds") + """Seconds the session should last for. Range is 60 to 86400 seconds. Default value is 600.""" + + +class CreateLivenessSessionResult(_model_base.Model): + """Response of liveness session creation. + + All required parameters must be populated in order to send to server. + + :ivar session_id: The unique session ID of the created session. It will expire 48 hours after + it was created or may be deleted sooner using the corresponding Session DELETE operation. + Required. + :vartype session_id: str + :ivar auth_token: Bearer token to provide authentication for the Vision SDK running on a client + application. This Bearer token has limited permissions to perform only the required action and + expires after the TTL time. It is also auditable. Required. + :vartype auth_token: str + """ + + session_id: str = rest_field(name="sessionId") + """The unique session ID of the created session. It will expire 48 hours after it was created or + may be deleted sooner using the corresponding Session DELETE operation. Required.""" + auth_token: str = rest_field(name="authToken") + """Bearer token to provide authentication for the Vision SDK running on a client application. This + Bearer token has limited permissions to perform only the required action and expires after the + TTL time. It is also auditable. Required.""" + + @overload + def __init__( + self, + *, + session_id: str, + auth_token: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class CreateLivenessWithVerifySessionContent(_model_base.Model): + """Request of liveness with verify session creation. + + All required parameters must be populated in order to send to server. + + :ivar parameters: The parameters for creating session. Required. + :vartype parameters: + ~azure.ai.vision.face.models._models.CreateLivenessSessionContentForMultipart + :ivar verify_image: The image stream for verify. Content-Disposition header field for this part + must have filename. Required. + :vartype verify_image: bytes + """ + + parameters: "_models._models.CreateLivenessSessionContentForMultipart" = rest_field(name="Parameters") + """The parameters for creating session. Required.""" + verify_image: FileType = rest_field(name="VerifyImage", is_multipart_file_input=True) + """The image stream for verify. Content-Disposition header field for this part must have filename. + Required.""" + + +class CreateLivenessWithVerifySessionResult(_model_base.Model): + """Response of liveness session with verify creation with verify image provided. + + All required parameters must be populated in order to send to server. + + :ivar session_id: The unique session ID of the created session. It will expire 48 hours after + it was created or may be deleted sooner using the corresponding Session DELETE operation. + Required. + :vartype session_id: str + :ivar auth_token: Bearer token to provide authentication for the Vision SDK running on a client + application. This Bearer token has limited permissions to perform only the required action and + expires after the TTL time. It is also auditable. Required. + :vartype auth_token: str + :ivar verify_image: The detail of face for verification. + :vartype verify_image: ~azure.ai.vision.face.models.LivenessWithVerifyImage + """ + + session_id: str = rest_field(name="sessionId") + """The unique session ID of the created session. It will expire 48 hours after it was created or + may be deleted sooner using the corresponding Session DELETE operation. Required.""" + auth_token: str = rest_field(name="authToken") + """Bearer token to provide authentication for the Vision SDK running on a client application. This + Bearer token has limited permissions to perform only the required action and expires after the + TTL time. It is also auditable. Required.""" + verify_image: Optional["_models.LivenessWithVerifyImage"] = rest_field(name="verifyImage") + """The detail of face for verification.""" + + @overload + def __init__( + self, + *, + session_id: str, + auth_token: str, + verify_image: Optional["_models.LivenessWithVerifyImage"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class CreatePersonResult(_model_base.Model): + """Response of create person. + + All required parameters must be populated in order to send to server. + + :ivar person_id: Person ID of the person. Required. + :vartype person_id: str + """ + + person_id: str = rest_field(name="personId") + """Person ID of the person. Required.""" + + @overload + def __init__( + self, + *, + person_id: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class DynamicPersonGroup(_model_base.Model): + """A container that references Person Directory "Create Person". + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar dynamic_person_group_id: ID of the dynamic person group. Required. + :vartype dynamic_person_group_id: str + :ivar name: User defined name, maximum length is 128. Required. + :vartype name: str + :ivar user_data: Optional user defined data. Length should not exceed 16K. + :vartype user_data: str + """ + + dynamic_person_group_id: str = rest_field(name="dynamicPersonGroupId", visibility=["read"]) + """ID of the dynamic person group. Required.""" + name: str = rest_field() + """User defined name, maximum length is 128. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """Optional user defined data. Length should not exceed 16K.""" + + @overload + def __init__( + self, + *, + name: str, + user_data: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ExposureProperties(_model_base.Model): + """Properties describing exposure level of the image. + + All required parameters must be populated in order to send to server. + + :ivar exposure_level: An enum value indicating level of exposure. Required. Known values are: + "underExposure", "goodExposure", and "overExposure". + :vartype exposure_level: str or ~azure.ai.vision.face.models.ExposureLevel + :ivar value: A number indicating level of exposure level ranging from 0 to 1. [0, 0.25) is + under exposure. [0.25, 0.75) is good exposure. [0.75, 1] is over exposure. Required. + :vartype value: float + """ + + exposure_level: Union[str, "_models.ExposureLevel"] = rest_field(name="exposureLevel") + """An enum value indicating level of exposure. Required. Known values are: \"underExposure\", + \"goodExposure\", and \"overExposure\".""" + value: float = rest_field() + """A number indicating level of exposure level ranging from 0 to 1. [0, 0.25) is under exposure. + [0.25, 0.75) is good exposure. [0.75, 1] is over exposure. Required.""" + + @overload + def __init__( + self, + *, + exposure_level: Union[str, "_models.ExposureLevel"], + value: float, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceAttributes(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Face attributes for the detected face. + + :ivar age: Age in years. + :vartype age: float + :ivar smile: Smile intensity, a number between [0,1]. + :vartype smile: float + :ivar facial_hair: Properties describing facial hair attributes. + :vartype facial_hair: ~azure.ai.vision.face.models.FacialHair + :ivar glasses: Glasses type if any of the face. Known values are: "noGlasses", + "readingGlasses", "sunglasses", and "swimmingGoggles". + :vartype glasses: str or ~azure.ai.vision.face.models.GlassesType + :ivar head_pose: 3-D roll/yaw/pitch angles for face direction. + :vartype head_pose: ~azure.ai.vision.face.models.HeadPose + :ivar hair: Properties describing hair attributes. + :vartype hair: ~azure.ai.vision.face.models.HairProperties + :ivar occlusion: Properties describing occlusions on a given face. + :vartype occlusion: ~azure.ai.vision.face.models.OcclusionProperties + :ivar accessories: Properties describing any accessories on a given face. + :vartype accessories: list[~azure.ai.vision.face.models.AccessoryItem] + :ivar blur: Properties describing any presence of blur within the image. + :vartype blur: ~azure.ai.vision.face.models.BlurProperties + :ivar exposure: Properties describing exposure level of the image. + :vartype exposure: ~azure.ai.vision.face.models.ExposureProperties + :ivar noise: Properties describing noise level of the image. + :vartype noise: ~azure.ai.vision.face.models.NoiseProperties + :ivar mask: Properties describing the presence of a mask on a given face. + :vartype mask: ~azure.ai.vision.face.models.MaskProperties + :ivar quality_for_recognition: Properties describing the overall image quality regarding + whether the image being used in the detection is of sufficient quality to attempt face + recognition on. Known values are: "low", "medium", and "high". + :vartype quality_for_recognition: str or ~azure.ai.vision.face.models.QualityForRecognition + """ + + age: Optional[float] = rest_field() + """Age in years.""" + smile: Optional[float] = rest_field() + """Smile intensity, a number between [0,1].""" + facial_hair: Optional["_models.FacialHair"] = rest_field(name="facialHair") + """Properties describing facial hair attributes.""" + glasses: Optional[Union[str, "_models.GlassesType"]] = rest_field() + """Glasses type if any of the face. Known values are: \"noGlasses\", \"readingGlasses\", + \"sunglasses\", and \"swimmingGoggles\".""" + head_pose: Optional["_models.HeadPose"] = rest_field(name="headPose") + """3-D roll/yaw/pitch angles for face direction.""" + hair: Optional["_models.HairProperties"] = rest_field() + """Properties describing hair attributes.""" + occlusion: Optional["_models.OcclusionProperties"] = rest_field() + """Properties describing occlusions on a given face.""" + accessories: Optional[List["_models.AccessoryItem"]] = rest_field() + """Properties describing any accessories on a given face.""" + blur: Optional["_models.BlurProperties"] = rest_field() + """Properties describing any presence of blur within the image.""" + exposure: Optional["_models.ExposureProperties"] = rest_field() + """Properties describing exposure level of the image.""" + noise: Optional["_models.NoiseProperties"] = rest_field() + """Properties describing noise level of the image.""" + mask: Optional["_models.MaskProperties"] = rest_field() + """Properties describing the presence of a mask on a given face.""" + quality_for_recognition: Optional[Union[str, "_models.QualityForRecognition"]] = rest_field( + name="qualityForRecognition" + ) + """Properties describing the overall image quality regarding whether the image being used in the + detection is of sufficient quality to attempt face recognition on. Known values are: \"low\", + \"medium\", and \"high\".""" + + @overload + def __init__( + self, + *, + age: Optional[float] = None, + smile: Optional[float] = None, + facial_hair: Optional["_models.FacialHair"] = None, + glasses: Optional[Union[str, "_models.GlassesType"]] = None, + head_pose: Optional["_models.HeadPose"] = None, + hair: Optional["_models.HairProperties"] = None, + occlusion: Optional["_models.OcclusionProperties"] = None, + accessories: Optional[List["_models.AccessoryItem"]] = None, + blur: Optional["_models.BlurProperties"] = None, + exposure: Optional["_models.ExposureProperties"] = None, + noise: Optional["_models.NoiseProperties"] = None, + mask: Optional["_models.MaskProperties"] = None, + quality_for_recognition: Optional[Union[str, "_models.QualityForRecognition"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceCollectionTrainingResult(_model_base.Model): + """Training result of a container. + + All required parameters must be populated in order to send to server. + + :ivar status: Training status of the container. Required. Known values are: "notStarted", + "running", "succeeded", and "failed". + :vartype status: str or ~azure.ai.vision.face.models.FaceOperationStatus + :ivar created_date_time: A combined UTC date and time string that describes the created time of + the person group, large person group or large face list. Required. + :vartype created_date_time: ~datetime.datetime + :ivar last_action_date_time: A combined UTC date and time string that describes the last modify + time of the person group, large person group or large face list, could be null value when the + group is not successfully trained. Required. + :vartype last_action_date_time: ~datetime.datetime + :ivar last_successful_training_date_time: A combined UTC date and time string that describes + the last successful training time of the person group, large person group or large face list. + Required. + :vartype last_successful_training_date_time: ~datetime.datetime + :ivar message: Show failure message when training failed (omitted when training succeed). + :vartype message: str + """ + + status: Union[str, "_models.FaceOperationStatus"] = rest_field() + """Training status of the container. Required. Known values are: \"notStarted\", \"running\", + \"succeeded\", and \"failed\".""" + created_date_time: datetime.datetime = rest_field(name="createdDateTime", format="rfc3339") + """A combined UTC date and time string that describes the created time of the person group, large + person group or large face list. Required.""" + last_action_date_time: datetime.datetime = rest_field(name="lastActionDateTime", format="rfc3339") + """A combined UTC date and time string that describes the last modify time of the person group, + large person group or large face list, could be null value when the group is not successfully + trained. Required.""" + last_successful_training_date_time: datetime.datetime = rest_field( + name="lastSuccessfulTrainingDateTime", format="rfc3339" + ) + """A combined UTC date and time string that describes the last successful training time of the + person group, large person group or large face list. Required.""" + message: Optional[str] = rest_field() + """Show failure message when training failed (omitted when training succeed).""" + + @overload + def __init__( + self, + *, + status: Union[str, "_models.FaceOperationStatus"], + created_date_time: datetime.datetime, + last_action_date_time: datetime.datetime, + last_successful_training_date_time: datetime.datetime, + message: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceDetectionResult(_model_base.Model): + """Response for detect API. + + All required parameters must be populated in order to send to server. + + :ivar face_id: Unique faceId of the detected face, created by detection API and it will expire + 24 hours after the detection call. To return this, it requires 'returnFaceId' parameter to be + true. + :vartype face_id: str + :ivar recognition_model: The 'recognitionModel' associated with this faceId. This is only + returned when 'returnRecognitionModel' is explicitly set as true. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". + :vartype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :ivar face_rectangle: A rectangle area for the face location on image. Required. + :vartype face_rectangle: ~azure.ai.vision.face.models.FaceRectangle + :ivar face_landmarks: An array of 27-point face landmarks pointing to the important positions + of face components. To return this, it requires 'returnFaceLandmarks' parameter to be true. + :vartype face_landmarks: ~azure.ai.vision.face.models.FaceLandmarks + :ivar face_attributes: Face attributes for detected face. + :vartype face_attributes: ~azure.ai.vision.face.models.FaceAttributes + """ + + face_id: Optional[str] = rest_field(name="faceId") + """Unique faceId of the detected face, created by detection API and it will expire 24 hours after + the detection call. To return this, it requires 'returnFaceId' parameter to be true.""" + recognition_model: Optional[Union[str, "_models.FaceRecognitionModel"]] = rest_field(name="recognitionModel") + """The 'recognitionModel' associated with this faceId. This is only returned when + 'returnRecognitionModel' is explicitly set as true. Known values are: \"recognition_01\", + \"recognition_02\", \"recognition_03\", and \"recognition_04\".""" + face_rectangle: "_models.FaceRectangle" = rest_field(name="faceRectangle") + """A rectangle area for the face location on image. Required.""" + face_landmarks: Optional["_models.FaceLandmarks"] = rest_field(name="faceLandmarks") + """An array of 27-point face landmarks pointing to the important positions of face components. To + return this, it requires 'returnFaceLandmarks' parameter to be true.""" + face_attributes: Optional["_models.FaceAttributes"] = rest_field(name="faceAttributes") + """Face attributes for detected face.""" + + @overload + def __init__( + self, + *, + face_rectangle: "_models.FaceRectangle", + face_id: Optional[str] = None, + recognition_model: Optional[Union[str, "_models.FaceRecognitionModel"]] = None, + face_landmarks: Optional["_models.FaceLandmarks"] = None, + face_attributes: Optional["_models.FaceAttributes"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceError(_model_base.Model): + """The error object. For comprehensive details on error codes and messages returned by the Face + Service, please refer to the following link: https://aka.ms/face-error-codes-and-messages. + + All required parameters must be populated in order to send to server. + + :ivar code: One of a server-defined set of error codes. Required. + :vartype code: str + :ivar message: A human-readable representation of the error. Required. + :vartype message: str + """ + + code: str = rest_field() + """One of a server-defined set of error codes. Required.""" + message: str = rest_field() + """A human-readable representation of the error. Required.""" + + @overload + def __init__( + self, + *, + code: str, + message: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceErrorResponse(_model_base.Model): + """A response containing error details. + + All required parameters must be populated in order to send to server. + + :ivar error: The error object. Required. + :vartype error: ~azure.ai.vision.face.models.FaceError + """ + + error: "_models.FaceError" = rest_field() + """The error object. Required.""" + + @overload + def __init__( + self, + *, + error: "_models.FaceError", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceFindSimilarResult(_model_base.Model): + """Response body for find similar face operation. + + All required parameters must be populated in order to send to server. + + :ivar confidence: Confidence value of the candidate. The higher confidence, the more similar. + Range between [0,1]. Required. + :vartype confidence: float + :ivar face_id: faceId of candidate face when find by faceIds. faceId is created by "Detect" and + will expire 24 hours after the detection call. + :vartype face_id: str + :ivar persisted_face_id: persistedFaceId of candidate face when find by faceListId or + largeFaceListId. persistedFaceId in face list/large face list is persisted and will not expire. + :vartype persisted_face_id: str + """ + + confidence: float = rest_field() + """Confidence value of the candidate. The higher confidence, the more similar. Range between + [0,1]. Required.""" + face_id: Optional[str] = rest_field(name="faceId") + """faceId of candidate face when find by faceIds. faceId is created by \"Detect\" and will expire + 24 hours after the detection call.""" + persisted_face_id: Optional[str] = rest_field(name="persistedFaceId") + """persistedFaceId of candidate face when find by faceListId or largeFaceListId. persistedFaceId + in face list/large face list is persisted and will not expire.""" + + @overload + def __init__( + self, + *, + confidence: float, + face_id: Optional[str] = None, + persisted_face_id: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceGroupingResult(_model_base.Model): + """Response body for group face operation. + + All required parameters must be populated in order to send to server. + + :ivar groups: A partition of the original faces based on face similarity. Groups are ranked by + number of faces. Required. + :vartype groups: list[list[str]] + :ivar messy_group: Face ids array of faces that cannot find any similar faces from original + faces. Required. + :vartype messy_group: list[str] + """ + + groups: List[List[str]] = rest_field() + """A partition of the original faces based on face similarity. Groups are ranked by number of + faces. Required.""" + messy_group: List[str] = rest_field(name="messyGroup") + """Face ids array of faces that cannot find any similar faces from original faces. Required.""" + + @overload + def __init__( + self, + *, + groups: List[List[str]], + messy_group: List[str], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceIdentificationCandidate(_model_base.Model): + """Candidate for identify call. + + All required parameters must be populated in order to send to server. + + :ivar person_id: personId of candidate person. Required. + :vartype person_id: str + :ivar confidence: Confidence value of the candidate. The higher confidence, the more similar. + Range between [0,1]. Required. + :vartype confidence: float + """ + + person_id: str = rest_field(name="personId") + """personId of candidate person. Required.""" + confidence: float = rest_field() + """Confidence value of the candidate. The higher confidence, the more similar. Range between + [0,1]. Required.""" + + @overload + def __init__( + self, + *, + person_id: str, + confidence: float, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceIdentificationResult(_model_base.Model): + """Identify result. + + All required parameters must be populated in order to send to server. + + :ivar face_id: faceId of the query face. Required. + :vartype face_id: str + :ivar candidates: Identified person candidates for that face (ranked by confidence). Array size + should be no larger than input maxNumOfCandidatesReturned. If no person is identified, will + return an empty array. Required. + :vartype candidates: list[~azure.ai.vision.face.models.FaceIdentificationCandidate] + """ + + face_id: str = rest_field(name="faceId") + """faceId of the query face. Required.""" + candidates: List["_models.FaceIdentificationCandidate"] = rest_field() + """Identified person candidates for that face (ranked by confidence). Array size should be no + larger than input maxNumOfCandidatesReturned. If no person is identified, will return an empty + array. Required.""" + + @overload + def __init__( + self, + *, + face_id: str, + candidates: List["_models.FaceIdentificationCandidate"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceLandmarks(_model_base.Model): # pylint: disable=too-many-instance-attributes + """A collection of 27-point face landmarks pointing to the important positions of face components. + + All required parameters must be populated in order to send to server. + + :ivar pupil_left: The coordinates of the left eye pupil. Required. + :vartype pupil_left: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar pupil_right: The coordinates of the right eye pupil. Required. + :vartype pupil_right: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar nose_tip: The coordinates of the nose tip. Required. + :vartype nose_tip: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar mouth_left: The coordinates of the mouth left. Required. + :vartype mouth_left: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar mouth_right: The coordinates of the mouth right. Required. + :vartype mouth_right: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar eyebrow_left_outer: The coordinates of the left eyebrow outer. Required. + :vartype eyebrow_left_outer: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar eyebrow_left_inner: The coordinates of the left eyebrow inner. Required. + :vartype eyebrow_left_inner: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar eye_left_outer: The coordinates of the left eye outer. Required. + :vartype eye_left_outer: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar eye_left_top: The coordinates of the left eye top. Required. + :vartype eye_left_top: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar eye_left_bottom: The coordinates of the left eye bottom. Required. + :vartype eye_left_bottom: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar eye_left_inner: The coordinates of the left eye inner. Required. + :vartype eye_left_inner: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar eyebrow_right_inner: The coordinates of the right eyebrow inner. Required. + :vartype eyebrow_right_inner: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar eyebrow_right_outer: The coordinates of the right eyebrow outer. Required. + :vartype eyebrow_right_outer: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar eye_right_inner: The coordinates of the right eye inner. Required. + :vartype eye_right_inner: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar eye_right_top: The coordinates of the right eye top. Required. + :vartype eye_right_top: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar eye_right_bottom: The coordinates of the right eye bottom. Required. + :vartype eye_right_bottom: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar eye_right_outer: The coordinates of the right eye outer. Required. + :vartype eye_right_outer: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar nose_root_left: The coordinates of the nose root left. Required. + :vartype nose_root_left: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar nose_root_right: The coordinates of the nose root right. Required. + :vartype nose_root_right: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar nose_left_alar_top: The coordinates of the nose left alar top. Required. + :vartype nose_left_alar_top: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar nose_right_alar_top: The coordinates of the nose right alar top. Required. + :vartype nose_right_alar_top: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar nose_left_alar_out_tip: The coordinates of the nose left alar out tip. Required. + :vartype nose_left_alar_out_tip: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar nose_right_alar_out_tip: The coordinates of the nose right alar out tip. Required. + :vartype nose_right_alar_out_tip: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar upper_lip_top: The coordinates of the upper lip top. Required. + :vartype upper_lip_top: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar upper_lip_bottom: The coordinates of the upper lip bottom. Required. + :vartype upper_lip_bottom: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar under_lip_top: The coordinates of the under lip top. Required. + :vartype under_lip_top: ~azure.ai.vision.face.models.LandmarkCoordinate + :ivar under_lip_bottom: The coordinates of the under lip bottom. Required. + :vartype under_lip_bottom: ~azure.ai.vision.face.models.LandmarkCoordinate + """ + + pupil_left: "_models.LandmarkCoordinate" = rest_field(name="pupilLeft") + """The coordinates of the left eye pupil. Required.""" + pupil_right: "_models.LandmarkCoordinate" = rest_field(name="pupilRight") + """The coordinates of the right eye pupil. Required.""" + nose_tip: "_models.LandmarkCoordinate" = rest_field(name="noseTip") + """The coordinates of the nose tip. Required.""" + mouth_left: "_models.LandmarkCoordinate" = rest_field(name="mouthLeft") + """The coordinates of the mouth left. Required.""" + mouth_right: "_models.LandmarkCoordinate" = rest_field(name="mouthRight") + """The coordinates of the mouth right. Required.""" + eyebrow_left_outer: "_models.LandmarkCoordinate" = rest_field(name="eyebrowLeftOuter") + """The coordinates of the left eyebrow outer. Required.""" + eyebrow_left_inner: "_models.LandmarkCoordinate" = rest_field(name="eyebrowLeftInner") + """The coordinates of the left eyebrow inner. Required.""" + eye_left_outer: "_models.LandmarkCoordinate" = rest_field(name="eyeLeftOuter") + """The coordinates of the left eye outer. Required.""" + eye_left_top: "_models.LandmarkCoordinate" = rest_field(name="eyeLeftTop") + """The coordinates of the left eye top. Required.""" + eye_left_bottom: "_models.LandmarkCoordinate" = rest_field(name="eyeLeftBottom") + """The coordinates of the left eye bottom. Required.""" + eye_left_inner: "_models.LandmarkCoordinate" = rest_field(name="eyeLeftInner") + """The coordinates of the left eye inner. Required.""" + eyebrow_right_inner: "_models.LandmarkCoordinate" = rest_field(name="eyebrowRightInner") + """The coordinates of the right eyebrow inner. Required.""" + eyebrow_right_outer: "_models.LandmarkCoordinate" = rest_field(name="eyebrowRightOuter") + """The coordinates of the right eyebrow outer. Required.""" + eye_right_inner: "_models.LandmarkCoordinate" = rest_field(name="eyeRightInner") + """The coordinates of the right eye inner. Required.""" + eye_right_top: "_models.LandmarkCoordinate" = rest_field(name="eyeRightTop") + """The coordinates of the right eye top. Required.""" + eye_right_bottom: "_models.LandmarkCoordinate" = rest_field(name="eyeRightBottom") + """The coordinates of the right eye bottom. Required.""" + eye_right_outer: "_models.LandmarkCoordinate" = rest_field(name="eyeRightOuter") + """The coordinates of the right eye outer. Required.""" + nose_root_left: "_models.LandmarkCoordinate" = rest_field(name="noseRootLeft") + """The coordinates of the nose root left. Required.""" + nose_root_right: "_models.LandmarkCoordinate" = rest_field(name="noseRootRight") + """The coordinates of the nose root right. Required.""" + nose_left_alar_top: "_models.LandmarkCoordinate" = rest_field(name="noseLeftAlarTop") + """The coordinates of the nose left alar top. Required.""" + nose_right_alar_top: "_models.LandmarkCoordinate" = rest_field(name="noseRightAlarTop") + """The coordinates of the nose right alar top. Required.""" + nose_left_alar_out_tip: "_models.LandmarkCoordinate" = rest_field(name="noseLeftAlarOutTip") + """The coordinates of the nose left alar out tip. Required.""" + nose_right_alar_out_tip: "_models.LandmarkCoordinate" = rest_field(name="noseRightAlarOutTip") + """The coordinates of the nose right alar out tip. Required.""" + upper_lip_top: "_models.LandmarkCoordinate" = rest_field(name="upperLipTop") + """The coordinates of the upper lip top. Required.""" + upper_lip_bottom: "_models.LandmarkCoordinate" = rest_field(name="upperLipBottom") + """The coordinates of the upper lip bottom. Required.""" + under_lip_top: "_models.LandmarkCoordinate" = rest_field(name="underLipTop") + """The coordinates of the under lip top. Required.""" + under_lip_bottom: "_models.LandmarkCoordinate" = rest_field(name="underLipBottom") + """The coordinates of the under lip bottom. Required.""" + + @overload + def __init__( + self, + *, + pupil_left: "_models.LandmarkCoordinate", + pupil_right: "_models.LandmarkCoordinate", + nose_tip: "_models.LandmarkCoordinate", + mouth_left: "_models.LandmarkCoordinate", + mouth_right: "_models.LandmarkCoordinate", + eyebrow_left_outer: "_models.LandmarkCoordinate", + eyebrow_left_inner: "_models.LandmarkCoordinate", + eye_left_outer: "_models.LandmarkCoordinate", + eye_left_top: "_models.LandmarkCoordinate", + eye_left_bottom: "_models.LandmarkCoordinate", + eye_left_inner: "_models.LandmarkCoordinate", + eyebrow_right_inner: "_models.LandmarkCoordinate", + eyebrow_right_outer: "_models.LandmarkCoordinate", + eye_right_inner: "_models.LandmarkCoordinate", + eye_right_top: "_models.LandmarkCoordinate", + eye_right_bottom: "_models.LandmarkCoordinate", + eye_right_outer: "_models.LandmarkCoordinate", + nose_root_left: "_models.LandmarkCoordinate", + nose_root_right: "_models.LandmarkCoordinate", + nose_left_alar_top: "_models.LandmarkCoordinate", + nose_right_alar_top: "_models.LandmarkCoordinate", + nose_left_alar_out_tip: "_models.LandmarkCoordinate", + nose_right_alar_out_tip: "_models.LandmarkCoordinate", + upper_lip_top: "_models.LandmarkCoordinate", + upper_lip_bottom: "_models.LandmarkCoordinate", + under_lip_top: "_models.LandmarkCoordinate", + under_lip_bottom: "_models.LandmarkCoordinate", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceList(_model_base.Model): + """Face list is a list of faces, up to 1,000 faces. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar name: User defined name, maximum length is 128. Required. + :vartype name: str + :ivar user_data: Optional user defined data. Length should not exceed 16K. + :vartype user_data: str + :ivar recognition_model: Name of recognition model. Recognition model is used when the face + features are extracted and associated with detected faceIds. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". + :vartype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :ivar face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :vartype face_list_id: str + :ivar persisted_faces: Face ids of registered faces in the face list. + :vartype persisted_faces: list[~azure.ai.vision.face.models.FaceListFace] + """ + + name: str = rest_field() + """User defined name, maximum length is 128. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """Optional user defined data. Length should not exceed 16K.""" + recognition_model: Optional[Union[str, "_models.FaceRecognitionModel"]] = rest_field(name="recognitionModel") + """Name of recognition model. Recognition model is used when the face features are extracted and + associated with detected faceIds. Known values are: \"recognition_01\", \"recognition_02\", + \"recognition_03\", and \"recognition_04\".""" + face_list_id: str = rest_field(name="faceListId", visibility=["read"]) + """Valid character is letter in lower case or digit or '-' or '_', maximum length is 64. Required.""" + persisted_faces: Optional[List["_models.FaceListFace"]] = rest_field(name="persistedFaces") + """Face ids of registered faces in the face list.""" + + @overload + def __init__( + self, + *, + name: str, + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, "_models.FaceRecognitionModel"]] = None, + persisted_faces: Optional[List["_models.FaceListFace"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceListFace(_model_base.Model): + """Face resource for face list. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar persisted_face_id: Face ID of the face. Required. + :vartype persisted_face_id: str + :ivar user_data: User-provided data attached to the face. The length limit is 1K. + :vartype user_data: str + """ + + persisted_face_id: str = rest_field(name="persistedFaceId", visibility=["read"]) + """Face ID of the face. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """User-provided data attached to the face. The length limit is 1K.""" + + @overload + def __init__( + self, + *, + user_data: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceListItem(_model_base.Model): + """Face list item for list face list. + + All required parameters must be populated in order to send to server. + + :ivar name: User defined name, maximum length is 128. Required. + :vartype name: str + :ivar user_data: Optional user defined data. Length should not exceed 16K. + :vartype user_data: str + :ivar recognition_model: Name of recognition model. Recognition model is used when the face + features are extracted and associated with detected faceIds. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". + :vartype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :ivar face_list_id: Valid character is letter in lower case or digit or '-' or '_', maximum + length is 64. Required. + :vartype face_list_id: str + """ + + name: str = rest_field() + """User defined name, maximum length is 128. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """Optional user defined data. Length should not exceed 16K.""" + recognition_model: Optional[Union[str, "_models.FaceRecognitionModel"]] = rest_field(name="recognitionModel") + """Name of recognition model. Recognition model is used when the face features are extracted and + associated with detected faceIds. Known values are: \"recognition_01\", \"recognition_02\", + \"recognition_03\", and \"recognition_04\".""" + face_list_id: str = rest_field(name="faceListId") + """Valid character is letter in lower case or digit or '-' or '_', maximum length is 64. Required.""" + + @overload + def __init__( + self, + *, + name: str, + face_list_id: str, + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, "_models.FaceRecognitionModel"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceOperationResult(_model_base.Model): + """Long running operation resource for person directory. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar operation_id: Operation ID of the operation. Required. + :vartype operation_id: str + :ivar status: Current status of the operation. Required. Known values are: "notStarted", + "running", "succeeded", and "failed". + :vartype status: str or ~azure.ai.vision.face.models.FaceOperationStatus + :ivar created_time: Date and time the operation was created. Required. + :vartype created_time: ~datetime.datetime + :ivar last_action_time: Date and time the operation was last updated. + :vartype last_action_time: ~datetime.datetime + :ivar finished_time: Date and time the operation was finished. + :vartype finished_time: ~datetime.datetime + :ivar message: Message for the operation. + :vartype message: str + """ + + operation_id: str = rest_field(name="operationId", visibility=["read"]) + """Operation ID of the operation. Required.""" + status: Union[str, "_models.FaceOperationStatus"] = rest_field() + """Current status of the operation. Required. Known values are: \"notStarted\", \"running\", + \"succeeded\", and \"failed\".""" + created_time: datetime.datetime = rest_field(name="createdTime", format="rfc3339") + """Date and time the operation was created. Required.""" + last_action_time: Optional[datetime.datetime] = rest_field(name="lastActionTime", format="rfc3339") + """Date and time the operation was last updated.""" + finished_time: Optional[datetime.datetime] = rest_field(name="finishedTime", format="rfc3339") + """Date and time the operation was finished.""" + message: Optional[str] = rest_field() + """Message for the operation.""" + + @overload + def __init__( + self, + *, + status: Union[str, "_models.FaceOperationStatus"], + created_time: datetime.datetime, + last_action_time: Optional[datetime.datetime] = None, + finished_time: Optional[datetime.datetime] = None, + message: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceRectangle(_model_base.Model): + """A rectangle within which a face can be found. + + All required parameters must be populated in order to send to server. + + :ivar top: The distance from the top edge if the image to the top edge of the rectangle, in + pixels. Required. + :vartype top: int + :ivar left: The distance from the left edge if the image to the left edge of the rectangle, in + pixels. Required. + :vartype left: int + :ivar width: The width of the rectangle, in pixels. Required. + :vartype width: int + :ivar height: The height of the rectangle, in pixels. Required. + :vartype height: int + """ + + top: int = rest_field() + """The distance from the top edge if the image to the top edge of the rectangle, in pixels. + Required.""" + left: int = rest_field() + """The distance from the left edge if the image to the left edge of the rectangle, in pixels. + Required.""" + width: int = rest_field() + """The width of the rectangle, in pixels. Required.""" + height: int = rest_field() + """The height of the rectangle, in pixels. Required.""" + + @overload + def __init__( + self, + *, + top: int, + left: int, + width: int, + height: int, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FaceVerificationResult(_model_base.Model): + """Verify result. + + All required parameters must be populated in order to send to server. + + :ivar is_identical: True if the two faces belong to the same person or the face belongs to the + person, otherwise false. Required. + :vartype is_identical: bool + :ivar confidence: A number indicates the similarity confidence of whether two faces belong to + the same person, or whether the face belongs to the person. By default, isIdentical is set to + True if similarity confidence is greater than or equal to 0.5. This is useful for advanced + users to override 'isIdentical' and fine-tune the result on their own data. Required. + :vartype confidence: float + """ + + is_identical: bool = rest_field(name="isIdentical") + """True if the two faces belong to the same person or the face belongs to the person, otherwise + false. Required.""" + confidence: float = rest_field() + """A number indicates the similarity confidence of whether two faces belong to the same person, or + whether the face belongs to the person. By default, isIdentical is set to True if similarity + confidence is greater than or equal to 0.5. This is useful for advanced users to override + 'isIdentical' and fine-tune the result on their own data. Required.""" + + @overload + def __init__( + self, + *, + is_identical: bool, + confidence: float, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FacialHair(_model_base.Model): + """Properties describing facial hair attributes. + + All required parameters must be populated in order to send to server. + + :ivar moustache: A number ranging from 0 to 1 indicating a level of confidence associated with + a property. Required. + :vartype moustache: float + :ivar beard: A number ranging from 0 to 1 indicating a level of confidence associated with a + property. Required. + :vartype beard: float + :ivar sideburns: A number ranging from 0 to 1 indicating a level of confidence associated with + a property. Required. + :vartype sideburns: float + """ + + moustache: float = rest_field() + """A number ranging from 0 to 1 indicating a level of confidence associated with a property. + Required.""" + beard: float = rest_field() + """A number ranging from 0 to 1 indicating a level of confidence associated with a property. + Required.""" + sideburns: float = rest_field() + """A number ranging from 0 to 1 indicating a level of confidence associated with a property. + Required.""" + + @overload + def __init__( + self, + *, + moustache: float, + beard: float, + sideburns: float, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class HairColor(_model_base.Model): + """An array of candidate colors and confidence level in the presence of each. + + All required parameters must be populated in order to send to server. + + :ivar color: Name of the hair color. Required. Known values are: "unknown", "white", "gray", + "blond", "brown", "red", "black", and "other". + :vartype color: str or ~azure.ai.vision.face.models.HairColorType + :ivar confidence: Confidence level of the color. Range between [0,1]. Required. + :vartype confidence: float + """ + + color: Union[str, "_models.HairColorType"] = rest_field() + """Name of the hair color. Required. Known values are: \"unknown\", \"white\", \"gray\", + \"blond\", \"brown\", \"red\", \"black\", and \"other\".""" + confidence: float = rest_field() + """Confidence level of the color. Range between [0,1]. Required.""" + + @overload + def __init__( + self, + *, + color: Union[str, "_models.HairColorType"], + confidence: float, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class HairProperties(_model_base.Model): + """Properties describing hair attributes. + + All required parameters must be populated in order to send to server. + + :ivar bald: A number describing confidence level of whether the person is bald. Required. + :vartype bald: float + :ivar invisible: A boolean value describing whether the hair is visible in the image. Required. + :vartype invisible: bool + :ivar hair_color: An array of candidate colors and confidence level in the presence of each. + Required. + :vartype hair_color: list[~azure.ai.vision.face.models.HairColor] + """ + + bald: float = rest_field() + """A number describing confidence level of whether the person is bald. Required.""" + invisible: bool = rest_field() + """A boolean value describing whether the hair is visible in the image. Required.""" + hair_color: List["_models.HairColor"] = rest_field(name="hairColor") + """An array of candidate colors and confidence level in the presence of each. Required.""" + + @overload + def __init__( + self, + *, + bald: float, + invisible: bool, + hair_color: List["_models.HairColor"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class HeadPose(_model_base.Model): + """3-D roll/yaw/pitch angles for face direction. + + All required parameters must be populated in order to send to server. + + :ivar pitch: Value of angles. Required. + :vartype pitch: float + :ivar roll: Value of angles. Required. + :vartype roll: float + :ivar yaw: Value of angles. Required. + :vartype yaw: float + """ + + pitch: float = rest_field() + """Value of angles. Required.""" + roll: float = rest_field() + """Value of angles. Required.""" + yaw: float = rest_field() + """Value of angles. Required.""" + + @overload + def __init__( + self, + *, + pitch: float, + roll: float, + yaw: float, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LandmarkCoordinate(_model_base.Model): + """Landmark coordinates within an image. + + All required parameters must be populated in order to send to server. + + :ivar x: The horizontal component, in pixels. Required. + :vartype x: float + :ivar y: The vertical component, in pixels. Required. + :vartype y: float + """ + + x: float = rest_field() + """The horizontal component, in pixels. Required.""" + y: float = rest_field() + """The vertical component, in pixels. Required.""" + + @overload + def __init__( + self, + *, + x: float, + y: float, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LargeFaceList(_model_base.Model): + """Large face list is a list of faces, up to 1,000,000 faces. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar name: User defined name, maximum length is 128. Required. + :vartype name: str + :ivar user_data: Optional user defined data. Length should not exceed 16K. + :vartype user_data: str + :ivar recognition_model: Name of recognition model. Recognition model is used when the face + features are extracted and associated with detected faceIds. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". + :vartype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :ivar large_face_list_id: Valid character is letter in lower case or digit or '-' or '_', + maximum length is 64. Required. + :vartype large_face_list_id: str + """ + + name: str = rest_field() + """User defined name, maximum length is 128. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """Optional user defined data. Length should not exceed 16K.""" + recognition_model: Optional[Union[str, "_models.FaceRecognitionModel"]] = rest_field(name="recognitionModel") + """Name of recognition model. Recognition model is used when the face features are extracted and + associated with detected faceIds. Known values are: \"recognition_01\", \"recognition_02\", + \"recognition_03\", and \"recognition_04\".""" + large_face_list_id: str = rest_field(name="largeFaceListId", visibility=["read"]) + """Valid character is letter in lower case or digit or '-' or '_', maximum length is 64. Required.""" + + @overload + def __init__( + self, + *, + name: str, + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, "_models.FaceRecognitionModel"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LargeFaceListFace(_model_base.Model): + """Face resource for large face list. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar persisted_face_id: Face ID of the face. Required. + :vartype persisted_face_id: str + :ivar user_data: User-provided data attached to the face. The length limit is 1K. + :vartype user_data: str + """ + + persisted_face_id: str = rest_field(name="persistedFaceId", visibility=["read"]) + """Face ID of the face. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """User-provided data attached to the face. The length limit is 1K.""" + + @overload + def __init__( + self, + *, + user_data: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LargePersonGroup(_model_base.Model): + """The container of the uploaded person data, including face recognition feature, and up to + 1,000,000 people. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar name: User defined name, maximum length is 128. Required. + :vartype name: str + :ivar user_data: Optional user defined data. Length should not exceed 16K. + :vartype user_data: str + :ivar recognition_model: Name of recognition model. Recognition model is used when the face + features are extracted and associated with detected faceIds. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". + :vartype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :ivar large_person_group_id: ID of the container. Required. + :vartype large_person_group_id: str + """ + + name: str = rest_field() + """User defined name, maximum length is 128. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """Optional user defined data. Length should not exceed 16K.""" + recognition_model: Optional[Union[str, "_models.FaceRecognitionModel"]] = rest_field(name="recognitionModel") + """Name of recognition model. Recognition model is used when the face features are extracted and + associated with detected faceIds. Known values are: \"recognition_01\", \"recognition_02\", + \"recognition_03\", and \"recognition_04\".""" + large_person_group_id: str = rest_field(name="largePersonGroupId", visibility=["read"]) + """ID of the container. Required.""" + + @overload + def __init__( + self, + *, + name: str, + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, "_models.FaceRecognitionModel"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LargePersonGroupPerson(_model_base.Model): + """The person in a specified large person group. To add face to this person, please call "Add + Large Person Group Person Face". + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar person_id: ID of the person. Required. + :vartype person_id: str + :ivar name: User defined name, maximum length is 128. Required. + :vartype name: str + :ivar user_data: Optional user defined data. Length should not exceed 16K. + :vartype user_data: str + :ivar persisted_face_ids: Face ids of registered faces in the person. + :vartype persisted_face_ids: list[str] + """ + + person_id: str = rest_field(name="personId", visibility=["read"]) + """ID of the person. Required.""" + name: str = rest_field() + """User defined name, maximum length is 128. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """Optional user defined data. Length should not exceed 16K.""" + persisted_face_ids: Optional[List[str]] = rest_field(name="persistedFaceIds") + """Face ids of registered faces in the person.""" + + @overload + def __init__( + self, + *, + name: str, + user_data: Optional[str] = None, + persisted_face_ids: Optional[List[str]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LargePersonGroupPersonFace(_model_base.Model): + """Face resource for large person group person. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar persisted_face_id: Face ID of the face. Required. + :vartype persisted_face_id: str + :ivar user_data: User-provided data attached to the face. The length limit is 1K. + :vartype user_data: str + """ + + persisted_face_id: str = rest_field(name="persistedFaceId", visibility=["read"]) + """Face ID of the face. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """User-provided data attached to the face. The length limit is 1K.""" + + @overload + def __init__( + self, + *, + user_data: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ListFaceResult(_model_base.Model): + """Response of list face of person. + + All required parameters must be populated in order to send to server. + + :ivar person_id: Id of person. Required. + :vartype person_id: str + :ivar persisted_face_ids: Array of persisted face ids. Required. + :vartype persisted_face_ids: list[str] + """ + + person_id: str = rest_field(name="personId") + """Id of person. Required.""" + persisted_face_ids: List[str] = rest_field(name="persistedFaceIds") + """Array of persisted face ids. Required.""" + + @overload + def __init__( + self, + *, + person_id: str, + persisted_face_ids: List[str], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ListGroupReferenceResult(_model_base.Model): + """Response of list dynamic person group of person. + + All required parameters must be populated in order to send to server. + + :ivar dynamic_person_group_ids: Array of PersonDirectory DynamicPersonGroup ids. Required. + :vartype dynamic_person_group_ids: list[str] + """ + + dynamic_person_group_ids: List[str] = rest_field(name="dynamicPersonGroupIds") + """Array of PersonDirectory DynamicPersonGroup ids. Required.""" + + @overload + def __init__( + self, + *, + dynamic_person_group_ids: List[str], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ListPersonResult(_model_base.Model): + """Response of list dynamic person group person. + + All required parameters must be populated in order to send to server. + + :ivar person_ids: Array of PersonDirectory Person ids. Required. + :vartype person_ids: list[str] + """ + + person_ids: List[str] = rest_field(name="personIds") + """Array of PersonDirectory Person ids. Required.""" + + @overload + def __init__( + self, + *, + person_ids: List[str], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LivenessOutputsTarget(_model_base.Model): + """The liveness classification for target face. + + All required parameters must be populated in order to send to server. + + :ivar face_rectangle: The face region where the liveness classification was made on. Required. + :vartype face_rectangle: ~azure.ai.vision.face.models.FaceRectangle + :ivar file_name: The file name which contains the face rectangle where the liveness + classification was made on. Required. + :vartype file_name: str + :ivar time_offset_within_file: The time offset within the file of the frame which contains the + face rectangle where the liveness classification was made on. Required. + :vartype time_offset_within_file: int + :ivar image_type: The image type which contains the face rectangle where the liveness + classification was made on. Required. Known values are: "Color", "Infrared", and "Depth". + :vartype image_type: str or ~azure.ai.vision.face.models.FaceImageType + """ + + face_rectangle: "_models.FaceRectangle" = rest_field(name="faceRectangle") + """The face region where the liveness classification was made on. Required.""" + file_name: str = rest_field(name="fileName") + """The file name which contains the face rectangle where the liveness classification was made on. + Required.""" + time_offset_within_file: int = rest_field(name="timeOffsetWithinFile") + """The time offset within the file of the frame which contains the face rectangle where the + liveness classification was made on. Required.""" + image_type: Union[str, "_models.FaceImageType"] = rest_field(name="imageType") + """The image type which contains the face rectangle where the liveness classification was made on. + Required. Known values are: \"Color\", \"Infrared\", and \"Depth\".""" + + @overload + def __init__( + self, + *, + face_rectangle: "_models.FaceRectangle", + file_name: str, + time_offset_within_file: int, + image_type: Union[str, "_models.FaceImageType"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LivenessResponseBody(_model_base.Model): + """The response body of detect liveness API call. + + :ivar liveness_decision: The liveness classification for the target face. Known values are: + "uncertain", "realface", and "spoofface". + :vartype liveness_decision: str or ~azure.ai.vision.face.models.FaceLivenessDecision + :ivar target: Specific targets used for liveness classification. + :vartype target: ~azure.ai.vision.face.models.LivenessOutputsTarget + :ivar model_version_used: The model version used for liveness classification. Known values are: + "2020-02-15-preview.01", "2021-11-12-preview.03", "2022-10-15-preview.04", and + "2023-03-02-preview.05". + :vartype model_version_used: str or ~azure.ai.vision.face.models.LivenessModel + :ivar verify_result: The face verification output. Only available when the request is liveness + with verify. + :vartype verify_result: ~azure.ai.vision.face.models.LivenessWithVerifyOutputs + """ + + liveness_decision: Optional[Union[str, "_models.FaceLivenessDecision"]] = rest_field(name="livenessDecision") + """The liveness classification for the target face. Known values are: \"uncertain\", \"realface\", + and \"spoofface\".""" + target: Optional["_models.LivenessOutputsTarget"] = rest_field() + """Specific targets used for liveness classification.""" + model_version_used: Optional[Union[str, "_models.LivenessModel"]] = rest_field(name="modelVersionUsed") + """The model version used for liveness classification. Known values are: + \"2020-02-15-preview.01\", \"2021-11-12-preview.03\", \"2022-10-15-preview.04\", and + \"2023-03-02-preview.05\".""" + verify_result: Optional["_models.LivenessWithVerifyOutputs"] = rest_field(name="verifyResult") + """The face verification output. Only available when the request is liveness with verify.""" + + @overload + def __init__( + self, + *, + liveness_decision: Optional[Union[str, "_models.FaceLivenessDecision"]] = None, + target: Optional["_models.LivenessOutputsTarget"] = None, + model_version_used: Optional[Union[str, "_models.LivenessModel"]] = None, + verify_result: Optional["_models.LivenessWithVerifyOutputs"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LivenessSession(_model_base.Model): + """Session result of detect liveness. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar id: The unique ID to reference this session. Required. + :vartype id: str + :ivar created_date_time: DateTime when this session was created. Required. + :vartype created_date_time: ~datetime.datetime + :ivar session_start_date_time: DateTime when this session was started by the client. + :vartype session_start_date_time: ~datetime.datetime + :ivar session_expired: Whether or not the session is expired. Required. + :vartype session_expired: bool + :ivar device_correlation_id: Unique Guid per each end-user device. This is to provide rate + limiting and anti-hammering. If 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + :vartype device_correlation_id: str + :ivar auth_token_time_to_live_in_seconds: Seconds the session should last for. Range is 60 to + 86400 seconds. Default value is 600. + :vartype auth_token_time_to_live_in_seconds: int + :ivar status: The current status of the session. Required. Known values are: "NotStarted", + "Started", and "ResultAvailable". + :vartype status: str or ~azure.ai.vision.face.models.FaceSessionStatus + :ivar result: The latest session audit result only populated if status == 'ResultAvailable'. + :vartype result: ~azure.ai.vision.face.models.LivenessSessionAuditEntry + """ + + id: str = rest_field(visibility=["read"]) + """The unique ID to reference this session. Required.""" + created_date_time: datetime.datetime = rest_field(name="createdDateTime", format="rfc3339") + """DateTime when this session was created. Required.""" + session_start_date_time: Optional[datetime.datetime] = rest_field(name="sessionStartDateTime", format="rfc3339") + """DateTime when this session was started by the client.""" + session_expired: bool = rest_field(name="sessionExpired") + """Whether or not the session is expired. Required.""" + device_correlation_id: Optional[str] = rest_field(name="deviceCorrelationId") + """Unique Guid per each end-user device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this 'deviceCorrelationId' must be + null.""" + auth_token_time_to_live_in_seconds: Optional[int] = rest_field(name="authTokenTimeToLiveInSeconds") + """Seconds the session should last for. Range is 60 to 86400 seconds. Default value is 600.""" + status: Union[str, "_models.FaceSessionStatus"] = rest_field() + """The current status of the session. Required. Known values are: \"NotStarted\", \"Started\", and + \"ResultAvailable\".""" + result: Optional["_models.LivenessSessionAuditEntry"] = rest_field() + """The latest session audit result only populated if status == 'ResultAvailable'.""" + + @overload + def __init__( + self, + *, + created_date_time: datetime.datetime, + session_expired: bool, + status: Union[str, "_models.FaceSessionStatus"], + session_start_date_time: Optional[datetime.datetime] = None, + device_correlation_id: Optional[str] = None, + auth_token_time_to_live_in_seconds: Optional[int] = None, + result: Optional["_models.LivenessSessionAuditEntry"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LivenessSessionAuditEntry(_model_base.Model): + """Audit entry for a request in session. + + All required parameters must be populated in order to send to server. + + :ivar id: The unique id to refer to this audit request. Use this id with the 'start' query + parameter to continue on to the next page of audit results. Required. + :vartype id: int + :ivar session_id: The unique sessionId of the created session. It will expire 48 hours after it + was created or may be deleted sooner using the corresponding session DELETE operation. + Required. + :vartype session_id: str + :ivar request_id: The unique requestId that is returned by the service to the client in the + 'apim-request-id' header. Required. + :vartype request_id: str + :ivar client_request_id: The unique clientRequestId that is sent by the client in the + 'client-request-id' header. Required. + :vartype client_request_id: str + :ivar received_date_time: The UTC DateTime that the request was received. Required. + :vartype received_date_time: ~datetime.datetime + :ivar request: The request of this entry. Required. + :vartype request: ~azure.ai.vision.face.models.AuditRequestInfo + :ivar response: The response of this entry. Required. + :vartype response: ~azure.ai.vision.face.models.AuditLivenessResponseInfo + :ivar digest: The server calculated digest for this request. If the client reported digest + differs from the server calculated digest, then the message integrity between the client and + service has been compromised and the result should not be trusted. For more information, see + how to guides on how to leverage this value to secure your end-to-end solution. Required. + :vartype digest: str + """ + + id: int = rest_field() + """The unique id to refer to this audit request. Use this id with the 'start' query parameter to + continue on to the next page of audit results. Required.""" + session_id: str = rest_field(name="sessionId") + """The unique sessionId of the created session. It will expire 48 hours after it was created or + may be deleted sooner using the corresponding session DELETE operation. Required.""" + request_id: str = rest_field(name="requestId") + """The unique requestId that is returned by the service to the client in the 'apim-request-id' + header. Required.""" + client_request_id: str = rest_field(name="clientRequestId") + """The unique clientRequestId that is sent by the client in the 'client-request-id' header. + Required.""" + received_date_time: datetime.datetime = rest_field(name="receivedDateTime", format="rfc3339") + """The UTC DateTime that the request was received. Required.""" + request: "_models.AuditRequestInfo" = rest_field() + """The request of this entry. Required.""" + response: "_models.AuditLivenessResponseInfo" = rest_field() + """The response of this entry. Required.""" + digest: str = rest_field() + """The server calculated digest for this request. If the client reported digest differs from the + server calculated digest, then the message integrity between the client and service has been + compromised and the result should not be trusted. For more information, see how to guides on + how to leverage this value to secure your end-to-end solution. Required.""" + + @overload + def __init__( + self, + *, + id: int, # pylint: disable=redefined-builtin + session_id: str, + request_id: str, + client_request_id: str, + received_date_time: datetime.datetime, + request: "_models.AuditRequestInfo", + response: "_models.AuditLivenessResponseInfo", + digest: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LivenessSessionItem(_model_base.Model): + """Session data returned for enumeration. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar id: The unique ID to reference this session. Required. + :vartype id: str + :ivar created_date_time: DateTime when this session was created. Required. + :vartype created_date_time: ~datetime.datetime + :ivar session_start_date_time: DateTime when this session was started by the client. + :vartype session_start_date_time: ~datetime.datetime + :ivar session_expired: Whether or not the session is expired. Required. + :vartype session_expired: bool + :ivar device_correlation_id: Unique Guid per each end-user device. This is to provide rate + limiting and anti-hammering. If 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + :vartype device_correlation_id: str + :ivar auth_token_time_to_live_in_seconds: Seconds the session should last for. Range is 60 to + 86400 seconds. Default value is 600. + :vartype auth_token_time_to_live_in_seconds: int + """ + + id: str = rest_field(visibility=["read"]) + """The unique ID to reference this session. Required.""" + created_date_time: datetime.datetime = rest_field(name="createdDateTime", format="rfc3339") + """DateTime when this session was created. Required.""" + session_start_date_time: Optional[datetime.datetime] = rest_field(name="sessionStartDateTime", format="rfc3339") + """DateTime when this session was started by the client.""" + session_expired: bool = rest_field(name="sessionExpired") + """Whether or not the session is expired. Required.""" + device_correlation_id: Optional[str] = rest_field(name="deviceCorrelationId") + """Unique Guid per each end-user device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this 'deviceCorrelationId' must be + null.""" + auth_token_time_to_live_in_seconds: Optional[int] = rest_field(name="authTokenTimeToLiveInSeconds") + """Seconds the session should last for. Range is 60 to 86400 seconds. Default value is 600.""" + + @overload + def __init__( + self, + *, + created_date_time: datetime.datetime, + session_expired: bool, + session_start_date_time: Optional[datetime.datetime] = None, + device_correlation_id: Optional[str] = None, + auth_token_time_to_live_in_seconds: Optional[int] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LivenessWithVerifyImage(_model_base.Model): + """The detail of face for verification. + + All required parameters must be populated in order to send to server. + + :ivar face_rectangle: The face region where the comparison image's classification was made. + Required. + :vartype face_rectangle: ~azure.ai.vision.face.models.FaceRectangle + :ivar quality_for_recognition: Quality of face image for recognition. Required. Known values + are: "low", "medium", and "high". + :vartype quality_for_recognition: str or ~azure.ai.vision.face.models.QualityForRecognition + """ + + face_rectangle: "_models.FaceRectangle" = rest_field(name="faceRectangle") + """The face region where the comparison image's classification was made. Required.""" + quality_for_recognition: Union[str, "_models.QualityForRecognition"] = rest_field(name="qualityForRecognition") + """Quality of face image for recognition. Required. Known values are: \"low\", \"medium\", and + \"high\".""" + + @overload + def __init__( + self, + *, + face_rectangle: "_models.FaceRectangle", + quality_for_recognition: Union[str, "_models.QualityForRecognition"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LivenessWithVerifyOutputs(_model_base.Model): + """The face verification output. + + All required parameters must be populated in order to send to server. + + :ivar verify_image: The detail of face for verification. Required. + :vartype verify_image: ~azure.ai.vision.face.models.LivenessWithVerifyImage + :ivar match_confidence: The target face liveness face and comparison image face verification + confidence. Required. + :vartype match_confidence: float + :ivar is_identical: Whether the target liveness face and comparison image face match. Required. + :vartype is_identical: bool + """ + + verify_image: "_models.LivenessWithVerifyImage" = rest_field(name="verifyImage") + """The detail of face for verification. Required.""" + match_confidence: float = rest_field(name="matchConfidence") + """The target face liveness face and comparison image face verification confidence. Required.""" + is_identical: bool = rest_field(name="isIdentical") + """Whether the target liveness face and comparison image face match. Required.""" + + @overload + def __init__( + self, + *, + verify_image: "_models.LivenessWithVerifyImage", + match_confidence: float, + is_identical: bool, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class LivenessWithVerifySession(_model_base.Model): + """Session result of detect liveness with verify. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar id: The unique ID to reference this session. Required. + :vartype id: str + :ivar created_date_time: DateTime when this session was created. Required. + :vartype created_date_time: ~datetime.datetime + :ivar session_start_date_time: DateTime when this session was started by the client. + :vartype session_start_date_time: ~datetime.datetime + :ivar session_expired: Whether or not the session is expired. Required. + :vartype session_expired: bool + :ivar device_correlation_id: Unique Guid per each end-user device. This is to provide rate + limiting and anti-hammering. If 'deviceCorrelationIdSetInClient' is true in this request, this + 'deviceCorrelationId' must be null. + :vartype device_correlation_id: str + :ivar auth_token_time_to_live_in_seconds: Seconds the session should last for. Range is 60 to + 86400 seconds. Default value is 600. + :vartype auth_token_time_to_live_in_seconds: int + :ivar status: The current status of the session. Required. Known values are: "NotStarted", + "Started", and "ResultAvailable". + :vartype status: str or ~azure.ai.vision.face.models.FaceSessionStatus + :ivar result: The latest session audit result only populated if status == 'ResultAvailable'. + :vartype result: ~azure.ai.vision.face.models.LivenessSessionAuditEntry + """ + + id: str = rest_field(visibility=["read"]) + """The unique ID to reference this session. Required.""" + created_date_time: datetime.datetime = rest_field(name="createdDateTime", format="rfc3339") + """DateTime when this session was created. Required.""" + session_start_date_time: Optional[datetime.datetime] = rest_field(name="sessionStartDateTime", format="rfc3339") + """DateTime when this session was started by the client.""" + session_expired: bool = rest_field(name="sessionExpired") + """Whether or not the session is expired. Required.""" + device_correlation_id: Optional[str] = rest_field(name="deviceCorrelationId") + """Unique Guid per each end-user device. This is to provide rate limiting and anti-hammering. If + 'deviceCorrelationIdSetInClient' is true in this request, this 'deviceCorrelationId' must be + null.""" + auth_token_time_to_live_in_seconds: Optional[int] = rest_field(name="authTokenTimeToLiveInSeconds") + """Seconds the session should last for. Range is 60 to 86400 seconds. Default value is 600.""" + status: Union[str, "_models.FaceSessionStatus"] = rest_field() + """The current status of the session. Required. Known values are: \"NotStarted\", \"Started\", and + \"ResultAvailable\".""" + result: Optional["_models.LivenessSessionAuditEntry"] = rest_field() + """The latest session audit result only populated if status == 'ResultAvailable'.""" + + @overload + def __init__( + self, + *, + created_date_time: datetime.datetime, + session_expired: bool, + status: Union[str, "_models.FaceSessionStatus"], + session_start_date_time: Optional[datetime.datetime] = None, + device_correlation_id: Optional[str] = None, + auth_token_time_to_live_in_seconds: Optional[int] = None, + result: Optional["_models.LivenessSessionAuditEntry"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MaskProperties(_model_base.Model): + """Properties describing the presence of a mask on a given face. + + All required parameters must be populated in order to send to server. + + :ivar nose_and_mouth_covered: A boolean value indicating whether nose and mouth are covered. + Required. + :vartype nose_and_mouth_covered: bool + :ivar type: Type of the mask. Required. Known values are: "faceMask", "noMask", + "otherMaskOrOcclusion", and "uncertain". + :vartype type: str or ~azure.ai.vision.face.models.MaskType + """ + + nose_and_mouth_covered: bool = rest_field(name="noseAndMouthCovered") + """A boolean value indicating whether nose and mouth are covered. Required.""" + type: Union[str, "_models.MaskType"] = rest_field() + """Type of the mask. Required. Known values are: \"faceMask\", \"noMask\", + \"otherMaskOrOcclusion\", and \"uncertain\".""" + + @overload + def __init__( + self, + *, + nose_and_mouth_covered: bool, + type: Union[str, "_models.MaskType"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class NoiseProperties(_model_base.Model): + """Properties describing noise level of the image. + + All required parameters must be populated in order to send to server. + + :ivar noise_level: An enum value indicating level of noise. Required. Known values are: "low", + "medium", and "high". + :vartype noise_level: str or ~azure.ai.vision.face.models.NoiseLevel + :ivar value: A number indicating level of noise level ranging from 0 to 1. [0, 0.25) is under + exposure. [0.25, 0.75) is good exposure. [0.75, 1] is over exposure. [0, 0.3) is low noise + level. [0.3, 0.7) is medium noise level. [0.7, 1] is high noise level. Required. + :vartype value: float + """ + + noise_level: Union[str, "_models.NoiseLevel"] = rest_field(name="noiseLevel") + """An enum value indicating level of noise. Required. Known values are: \"low\", \"medium\", and + \"high\".""" + value: float = rest_field() + """A number indicating level of noise level ranging from 0 to 1. [0, 0.25) is under exposure. + [0.25, 0.75) is good exposure. [0.75, 1] is over exposure. [0, 0.3) is low noise level. [0.3, + 0.7) is medium noise level. [0.7, 1] is high noise level. Required.""" + + @overload + def __init__( + self, + *, + noise_level: Union[str, "_models.NoiseLevel"], + value: float, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class OcclusionProperties(_model_base.Model): + """Properties describing occlusions on a given face. + + All required parameters must be populated in order to send to server. + + :ivar forehead_occluded: A boolean value indicating whether forehead is occluded. Required. + :vartype forehead_occluded: bool + :ivar eye_occluded: A boolean value indicating whether eyes are occluded. Required. + :vartype eye_occluded: bool + :ivar mouth_occluded: A boolean value indicating whether the mouth is occluded. Required. + :vartype mouth_occluded: bool + """ + + forehead_occluded: bool = rest_field(name="foreheadOccluded") + """A boolean value indicating whether forehead is occluded. Required.""" + eye_occluded: bool = rest_field(name="eyeOccluded") + """A boolean value indicating whether eyes are occluded. Required.""" + mouth_occluded: bool = rest_field(name="mouthOccluded") + """A boolean value indicating whether the mouth is occluded. Required.""" + + @overload + def __init__( + self, + *, + forehead_occluded: bool, + eye_occluded: bool, + mouth_occluded: bool, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class PersonDirectoryFace(_model_base.Model): + """Face resource for person directory person. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar persisted_face_id: Face ID of the face. Required. + :vartype persisted_face_id: str + :ivar user_data: User-provided data attached to the face. The length limit is 1K. + :vartype user_data: str + """ + + persisted_face_id: str = rest_field(name="persistedFaceId", visibility=["read"]) + """Face ID of the face. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """User-provided data attached to the face. The length limit is 1K.""" + + @overload + def __init__( + self, + *, + user_data: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class PersonDirectoryPerson(_model_base.Model): + """Person resource for person directory. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar person_id: Person ID of the person. Required. + :vartype person_id: str + :ivar name: User defined name, maximum length is 128. Required. + :vartype name: str + :ivar user_data: Optional user defined data. Length should not exceed 16K. + :vartype user_data: str + """ + + person_id: str = rest_field(name="personId", visibility=["read"]) + """Person ID of the person. Required.""" + name: str = rest_field() + """User defined name, maximum length is 128. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """Optional user defined data. Length should not exceed 16K.""" + + @overload + def __init__( + self, + *, + name: str, + user_data: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class PersonGroup(_model_base.Model): + """The container of the uploaded person data, including face recognition feature, and up to 10,000 + persons. To handle larger scale face identification problem, please consider using Large Person + Group. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar name: User defined name, maximum length is 128. Required. + :vartype name: str + :ivar user_data: Optional user defined data. Length should not exceed 16K. + :vartype user_data: str + :ivar recognition_model: Name of recognition model. Recognition model is used when the face + features are extracted and associated with detected faceIds. Known values are: + "recognition_01", "recognition_02", "recognition_03", and "recognition_04". + :vartype recognition_model: str or ~azure.ai.vision.face.models.FaceRecognitionModel + :ivar person_group_id: ID of the container. Required. + :vartype person_group_id: str + """ + + name: str = rest_field() + """User defined name, maximum length is 128. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """Optional user defined data. Length should not exceed 16K.""" + recognition_model: Optional[Union[str, "_models.FaceRecognitionModel"]] = rest_field(name="recognitionModel") + """Name of recognition model. Recognition model is used when the face features are extracted and + associated with detected faceIds. Known values are: \"recognition_01\", \"recognition_02\", + \"recognition_03\", and \"recognition_04\".""" + person_group_id: str = rest_field(name="personGroupId", visibility=["read"]) + """ID of the container. Required.""" + + @overload + def __init__( + self, + *, + name: str, + user_data: Optional[str] = None, + recognition_model: Optional[Union[str, "_models.FaceRecognitionModel"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class PersonGroupPerson(_model_base.Model): + """The person in a specified person group. To add face to this person, please call "Add Large + Person Group Person Face". + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar person_id: ID of the person. Required. + :vartype person_id: str + :ivar name: User defined name, maximum length is 128. Required. + :vartype name: str + :ivar user_data: Optional user defined data. Length should not exceed 16K. + :vartype user_data: str + :ivar persisted_face_ids: Face ids of registered faces in the person. + :vartype persisted_face_ids: list[str] + """ + + person_id: str = rest_field(name="personId", visibility=["read"]) + """ID of the person. Required.""" + name: str = rest_field() + """User defined name, maximum length is 128. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """Optional user defined data. Length should not exceed 16K.""" + persisted_face_ids: Optional[List[str]] = rest_field(name="persistedFaceIds") + """Face ids of registered faces in the person.""" + + @overload + def __init__( + self, + *, + name: str, + user_data: Optional[str] = None, + persisted_face_ids: Optional[List[str]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class PersonGroupPersonFace(_model_base.Model): + """Face resource for person group person. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to server. + + :ivar persisted_face_id: Face ID of the face. Required. + :vartype persisted_face_id: str + :ivar user_data: User-provided data attached to the face. The length limit is 1K. + :vartype user_data: str + """ + + persisted_face_id: str = rest_field(name="persistedFaceId", visibility=["read"]) + """Face ID of the face. Required.""" + user_data: Optional[str] = rest_field(name="userData") + """User-provided data attached to the face. The length limit is 1K.""" + + @overload + def __init__( + self, + *, + user_data: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/_patch.py b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/models/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/face/azure-ai-vision-face/azure/ai/vision/face/py.typed b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/azure/ai/vision/face/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/face/azure-ai-vision-face/dev_requirements.txt b/sdk/face/azure-ai-vision-face/dev_requirements.txt new file mode 100644 index 000000000000..105486471444 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/dev_requirements.txt @@ -0,0 +1,3 @@ +-e ../../../tools/azure-sdk-tools +../../core/azure-core +aiohttp \ No newline at end of file diff --git a/sdk/face/azure-ai-vision-face/sdk_packaging.toml b/sdk/face/azure-ai-vision-face/sdk_packaging.toml new file mode 100644 index 000000000000..e7687fdae93b --- /dev/null +++ b/sdk/face/azure-ai-vision-face/sdk_packaging.toml @@ -0,0 +1,2 @@ +[packaging] +auto_update = false \ No newline at end of file diff --git a/sdk/face/azure-ai-vision-face/setup.py b/sdk/face/azure-ai-vision-face/setup.py new file mode 100644 index 000000000000..85c0e4ceb836 --- /dev/null +++ b/sdk/face/azure-ai-vision-face/setup.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# coding: utf-8 + +import os +import re +from setuptools import setup, find_packages + + +PACKAGE_NAME = "azure-ai-vision-face" +PACKAGE_PPRINT_NAME = "Azure Ai Vision Face" + +# a-b-c => a/b/c +package_folder_path = PACKAGE_NAME.replace("-", "/") + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, "_version.py"), "r") as fd: + version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError("Cannot find version information") + + +setup( + name=PACKAGE_NAME, + version=version, + description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + long_description=open("README.md", "r").read(), + long_description_content_type="text/markdown", + license="MIT License", + author="Microsoft Corporation", + author_email="azpysdkhelp@microsoft.com", + url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", + keywords="azure, azure sdk", + classifiers=[ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "License :: OSI Approved :: MIT License", + ], + zip_safe=False, + packages=find_packages( + exclude=[ + "tests", + # Exclude packages that will be covered by PEP420 or nspkg + "azure", + "azure.ai", + "azure.ai.vision", + ] + ), + include_package_data=True, + package_data={ + "azure.ai.vision.face": ["py.typed"], + }, + install_requires=[ + "isodate>=0.6.1", + "azure-core>=1.30.0", + "typing-extensions>=4.6.0", + ], + python_requires=">=3.8", +) diff --git a/sdk/face/azure-ai-vision-face/tsp-location.yaml b/sdk/face/azure-ai-vision-face/tsp-location.yaml new file mode 100644 index 000000000000..e64b5978f87a --- /dev/null +++ b/sdk/face/azure-ai-vision-face/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/ai/Face +commit: 37acfe2967e5e1be1169146ac461eb1875c9476e +repo: Azure/azure-rest-api-specs +additionalDirectories: diff --git a/sdk/face/ci.yml b/sdk/face/ci.yml new file mode 100644 index 000000000000..66a24e07a1d8 --- /dev/null +++ b/sdk/face/ci.yml @@ -0,0 +1,34 @@ +# DO NOT EDIT THIS FILE +# This file is generated automatically and any changes will be lost. + +trigger: + branches: + include: + - main + - hotfix/* + - release/* + - restapi* + paths: + include: + - sdk/face/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + - restapi* + paths: + include: + - sdk/face/ + +extends: + template: ../../eng/pipelines/templates/stages/archetype-sdk-client.yml + parameters: + ServiceDirectory: face + TestProxy: true + Artifacts: + - name: azure-ai-vision-face + safeName: azureaivisionface