diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index 2e893be9751ee..bb7eade6204ec 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -28,12 +28,13 @@ __version__ = VERSION +from ._credential import SearchApiKeyCredential + from ._index import ( AutocompleteQuery, IndexAction, IndexDocumentsBatch, IndexingResult, - SearchApiKeyCredential, SearchIndexClient, SearchQuery, SearchItemPaged, @@ -41,6 +42,8 @@ odata, ) +from ._service import SearchServiceClient + __all__ = ( "AutocompleteQuery", "IndexAction", @@ -50,6 +53,7 @@ "SearchIndexClient", "SearchItemPaged", "SearchQuery", + "SearchServiceClient", "SuggestQuery", "odata", ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_credential.py b/sdk/search/azure-search-documents/azure/search/documents/_credential.py similarity index 100% rename from sdk/search/azure-search-documents/azure/search/documents/_index/_credential.py rename to sdk/search/azure-search-documents/azure/search/documents/_credential.py diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_index/__init__.py index 05f7ebfa87776..d43ed7e458174 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/__init__.py @@ -2,7 +2,6 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -from ._credential import SearchApiKeyCredential # pylint: disable=unused-import from ._index_documents_batch import IndexDocumentsBatch # pylint: disable=unused-import from ._search_index_client import ( # pylint: disable=unused-import odata, diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_search_index_client.py index c7dd7997f1e98..48fe30ab96cc9 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_search_index_client.py @@ -19,7 +19,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports from typing import Any, Union - from ._credential import SearchApiKeyCredential + from .. import SearchApiKeyCredential def odata(statement, **kwargs): diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/aio/_search_index_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/_index/aio/_search_index_client_async.py index d4f563d29978a..fe447123d6cfd 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/aio/_search_index_client_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/aio/_search_index_client_async.py @@ -19,7 +19,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports from typing import Any, Union - from .._credential import SearchApiKeyCredential + from ... import SearchApiKeyCredential class SearchIndexClient(object): @@ -33,7 +33,6 @@ class SearchIndexClient(object): :type credential: SearchApiKeyCredential .. admonition:: Example: - .. literalinclude:: ../samples/async_samples/sample_authentication_async.py :start-after: [START create_search_client_with_key_async] :end-before: [END create_search_client_with_key_async] diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/__init__.py new file mode 100644 index 0000000000000..eca411e6a05ab --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/__init__.py @@ -0,0 +1,5 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +from ._search_service_client import SearchServiceClient # pylint: disable=unused-import diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/__init__.py new file mode 100644 index 0000000000000..4e8378902f12f --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._search_service_client import SearchServiceClient +__all__ = ['SearchServiceClient'] diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_configuration.py new file mode 100644 index 0000000000000..28368a23abde1 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_configuration.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +VERSION = "unknown" + +class SearchServiceClientConfiguration(Configuration): + """Configuration for SearchServiceClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: The endpoint URL of the search service. + :type endpoint: str + """ + + def __init__( + self, + endpoint, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + super(SearchServiceClientConfiguration, self).__init__(**kwargs) + + self.endpoint = endpoint + self.api_version = "2019-05-06-Preview" + kwargs.setdefault('sdk_moniker', 'searchserviceclient/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs # type: Any + ): + # type: (...) -> None + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_search_service_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_search_service_client.py new file mode 100644 index 0000000000000..f466d50bb2aae --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_search_service_client.py @@ -0,0 +1,75 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core import PipelineClient +from msrest import Deserializer, Serializer + +from ._configuration import SearchServiceClientConfiguration +from .operations import DataSourcesOperations +from .operations import IndexersOperations +from .operations import SkillsetsOperations +from .operations import SynonymMapsOperations +from .operations import IndexesOperations +from .operations import SearchServiceClientOperationsMixin +from . import models + + +class SearchServiceClient(SearchServiceClientOperationsMixin): + """Client that can be used to manage and query indexes and documents, as well as manage other resources, on a search service. + + :ivar data_sources: DataSourcesOperations operations + :vartype data_sources: search_service_client.operations.DataSourcesOperations + :ivar indexers: IndexersOperations operations + :vartype indexers: search_service_client.operations.IndexersOperations + :ivar skillsets: SkillsetsOperations operations + :vartype skillsets: search_service_client.operations.SkillsetsOperations + :ivar synonym_maps: SynonymMapsOperations operations + :vartype synonym_maps: search_service_client.operations.SynonymMapsOperations + :ivar indexes: IndexesOperations operations + :vartype indexes: search_service_client.operations.IndexesOperations + :param endpoint: The endpoint URL of the search service. + :type endpoint: str + """ + + def __init__( + self, + endpoint, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + base_url = '{endpoint}' + self._config = SearchServiceClientConfiguration(endpoint, **kwargs) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.data_sources = DataSourcesOperations( + self._client, self._config, self._serialize, self._deserialize) + self.indexers = IndexersOperations( + self._client, self._config, self._serialize, self._deserialize) + self.skillsets = SkillsetsOperations( + self._client, self._config, self._serialize, self._deserialize) + self.synonym_maps = SynonymMapsOperations( + self._client, self._config, self._serialize, self._deserialize) + self.indexes = IndexesOperations( + self._client, self._config, self._serialize, self._deserialize) + + def close(self): + # type: () -> None + self._client.close() + + def __enter__(self): + # type: () -> SearchServiceClient + self._client.__enter__() + return self + + def __exit__(self, *exc_details): + # type: (Any) -> None + self._client.__exit__(*exc_details) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/__init__.py new file mode 100644 index 0000000000000..6ffdee2181084 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._search_service_client_async import SearchServiceClient +__all__ = ['SearchServiceClient'] diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_configuration_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_configuration_async.py new file mode 100644 index 0000000000000..022214a557bb5 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_configuration_async.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +VERSION = "unknown" + +class SearchServiceClientConfiguration(Configuration): + """Configuration for SearchServiceClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: The endpoint URL of the search service. + :type endpoint: str + """ + + def __init__( + self, + endpoint: str, + **kwargs: Any + ) -> None: + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + super(SearchServiceClientConfiguration, self).__init__(**kwargs) + + self.endpoint = endpoint + self.api_version = "2019-05-06-Preview" + kwargs.setdefault('sdk_moniker', 'searchserviceclient/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ) -> None: + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_search_service_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_search_service_client_async.py new file mode 100644 index 0000000000000..11b0ab3a5fdff --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_search_service_client_async.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core import AsyncPipelineClient +from msrest import Deserializer, Serializer + +from ._configuration_async import SearchServiceClientConfiguration +from .operations_async import DataSourcesOperations +from .operations_async import IndexersOperations +from .operations_async import SkillsetsOperations +from .operations_async import SynonymMapsOperations +from .operations_async import IndexesOperations +from .operations_async import SearchServiceClientOperationsMixin +from .. import models + + +class SearchServiceClient(SearchServiceClientOperationsMixin): + """Client that can be used to manage and query indexes and documents, as well as manage other resources, on a search service. + + :ivar data_sources: DataSourcesOperations operations + :vartype data_sources: search_service_client.aio.operations_async.DataSourcesOperations + :ivar indexers: IndexersOperations operations + :vartype indexers: search_service_client.aio.operations_async.IndexersOperations + :ivar skillsets: SkillsetsOperations operations + :vartype skillsets: search_service_client.aio.operations_async.SkillsetsOperations + :ivar synonym_maps: SynonymMapsOperations operations + :vartype synonym_maps: search_service_client.aio.operations_async.SynonymMapsOperations + :ivar indexes: IndexesOperations operations + :vartype indexes: search_service_client.aio.operations_async.IndexesOperations + :param endpoint: The endpoint URL of the search service. + :type endpoint: str + """ + + def __init__( + self, + endpoint: str, + **kwargs: Any + ) -> None: + base_url = '{endpoint}' + self._config = SearchServiceClientConfiguration(endpoint, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.data_sources = DataSourcesOperations( + self._client, self._config, self._serialize, self._deserialize) + self.indexers = IndexersOperations( + self._client, self._config, self._serialize, self._deserialize) + self.skillsets = SkillsetsOperations( + self._client, self._config, self._serialize, self._deserialize) + self.synonym_maps = SynonymMapsOperations( + self._client, self._config, self._serialize, self._deserialize) + self.indexes = IndexesOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "SearchServiceClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/__init__.py new file mode 100644 index 0000000000000..a9e96c7654984 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/__init__.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._data_sources_operations_async import DataSourcesOperations +from ._indexers_operations_async import IndexersOperations +from ._skillsets_operations_async import SkillsetsOperations +from ._synonym_maps_operations_async import SynonymMapsOperations +from ._indexes_operations_async import IndexesOperations +from ._search_service_client_operations_async import SearchServiceClientOperationsMixin + +__all__ = [ + 'DataSourcesOperations', + 'IndexersOperations', + 'SkillsetsOperations', + 'SynonymMapsOperations', + 'IndexesOperations', + 'SearchServiceClientOperationsMixin', +] diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py new file mode 100644 index 0000000000000..431a492fd9586 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py @@ -0,0 +1,385 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class DataSourcesOperations: + """DataSourcesOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~search_service_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create_or_update( + self, + data_source_name: str, + data_source: "models.DataSource", + request_options: Optional["models.RequestOptions"] = None, + access_condition: Optional["models.AccessCondition"] = None, + **kwargs + ) -> "models.DataSource": + """Creates a new datasource or updates a datasource if it already exists. + + :param data_source_name: The name of the datasource to create or update. + :type data_source_name: str + :param data_source: The definition of the datasource to create or update. + :type data_source: ~search_service_client.models.DataSource + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DataSource or the result of cls(response) + :rtype: ~search_service_client.models.DataSource or ~search_service_client.models.DataSource + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + prefer = "return=representation" + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(data_source, 'DataSource') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('DataSource', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('DataSource', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + + async def delete( + self, + data_source_name: str, + request_options: Optional["models.RequestOptions"] = None, + access_condition: Optional["models.AccessCondition"] = None, + **kwargs + ) -> None: + """Deletes a datasource. + + :param data_source_name: The name of the datasource to delete. + :type data_source_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + + async def get( + self, + data_source_name: str, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.DataSource": + """Retrieves a datasource definition. + + :param data_source_name: The name of the datasource to retrieve. + :type data_source_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DataSource or the result of cls(response) + :rtype: ~search_service_client.models.DataSource + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DataSource', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + + async def list( + self, + select: Optional[str] = None, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.ListDataSourcesResult": + """Lists all datasources available for a search service. + + :param select: Selects which top-level properties of the data sources to retrieve. Specified as + a comma-separated list of JSON property names, or '*' for all properties. The default is all + properties. + :type select: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListDataSourcesResult or the result of cls(response) + :rtype: ~search_service_client.models.ListDataSourcesResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ListDataSourcesResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ListDataSourcesResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/datasources'} + + async def create( + self, + data_source: "models.DataSource", + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.DataSource": + """Creates a new datasource. + + :param data_source: The definition of the datasource to create. + :type data_source: ~search_service_client.models.DataSource + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DataSource or the result of cls(response) + :rtype: ~search_service_client.models.DataSource + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(data_source, 'DataSource') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DataSource', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create.metadata = {'url': '/datasources'} diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py new file mode 100644 index 0000000000000..acd91482da732 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py @@ -0,0 +1,560 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class IndexersOperations: + """IndexersOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~search_service_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def reset( + self, + indexer_name: str, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> None: + """Resets the change tracking state associated with an indexer. + + :param indexer_name: The name of the indexer to reset. + :type indexer_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.reset.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + reset.metadata = {'url': '/indexers(\'{indexerName}\')/search.reset'} + + async def run( + self, + indexer_name: str, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> None: + """Runs an indexer on-demand. + + :param indexer_name: The name of the indexer to run. + :type indexer_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.run.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + run.metadata = {'url': '/indexers(\'{indexerName}\')/search.run'} + + async def create_or_update( + self, + indexer_name: str, + indexer: "models.Indexer", + request_options: Optional["models.RequestOptions"] = None, + access_condition: Optional["models.AccessCondition"] = None, + **kwargs + ) -> "models.Indexer": + """Creates a new indexer or updates an indexer if it already exists. + + :param indexer_name: The name of the indexer to create or update. + :type indexer_name: str + :param indexer: The definition of the indexer to create or update. + :type indexer: ~search_service_client.models.Indexer + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Indexer or the result of cls(response) + :rtype: ~search_service_client.models.Indexer or ~search_service_client.models.Indexer + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + prefer = "return=representation" + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(indexer, 'Indexer') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('Indexer', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('Indexer', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/indexers(\'{indexerName}\')'} + + async def delete( + self, + indexer_name: str, + request_options: Optional["models.RequestOptions"] = None, + access_condition: Optional["models.AccessCondition"] = None, + **kwargs + ) -> None: + """Deletes an indexer. + + :param indexer_name: The name of the indexer to delete. + :type indexer_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/indexers(\'{indexerName}\')'} + + async def get( + self, + indexer_name: str, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.Indexer": + """Retrieves an indexer definition. + + :param indexer_name: The name of the indexer to retrieve. + :type indexer_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Indexer or the result of cls(response) + :rtype: ~search_service_client.models.Indexer + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Indexer', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/indexers(\'{indexerName}\')'} + + async def list( + self, + select: Optional[str] = None, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.ListIndexersResult": + """Lists all indexers available for a search service. + + :param select: Selects which top-level properties of the indexers to retrieve. Specified as a + comma-separated list of JSON property names, or '*' for all properties. The default is all + properties. + :type select: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListIndexersResult or the result of cls(response) + :rtype: ~search_service_client.models.ListIndexersResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ListIndexersResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ListIndexersResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/indexers'} + + async def create( + self, + indexer: "models.Indexer", + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.Indexer": + """Creates a new indexer. + + :param indexer: The definition of the indexer to create. + :type indexer: ~search_service_client.models.Indexer + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Indexer or the result of cls(response) + :rtype: ~search_service_client.models.Indexer + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(indexer, 'Indexer') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Indexer', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create.metadata = {'url': '/indexers'} + + async def get_status( + self, + indexer_name: str, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.IndexerExecutionInfo": + """Returns the current status and execution history of an indexer. + + :param indexer_name: The name of the indexer for which to retrieve status. + :type indexer_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IndexerExecutionInfo or the result of cls(response) + :rtype: ~search_service_client.models.IndexerExecutionInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.IndexerExecutionInfo"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get_status.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('IndexerExecutionInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_status.metadata = {'url': '/indexers(\'{indexerName}\')/search.status'} diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py new file mode 100644 index 0000000000000..3a2955b842768 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py @@ -0,0 +1,524 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class IndexesOperations: + """IndexesOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~search_service_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + index: "models.Index", + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.Index": + """Creates a new search index. + + :param index: The definition of the index to create. + :type index: ~search_service_client.models.Index + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Index or the result of cls(response) + :rtype: ~search_service_client.models.Index + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(index, 'Index') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Index', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create.metadata = {'url': '/indexes'} + + async def list( + self, + select: Optional[str] = None, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.ListIndexesResult": + """Lists all indexes available for a search service. + + :param select: Selects which top-level properties of the index definitions to retrieve. + Specified as a comma-separated list of JSON property names, or '*' for all properties. The + default is all properties. + :type select: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListIndexesResult or the result of cls(response) + :rtype: ~search_service_client.models.ListIndexesResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ListIndexesResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ListIndexesResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/indexes'} + + async def create_or_update( + self, + index_name: str, + index: "models.Index", + allow_index_downtime: Optional[bool] = None, + request_options: Optional["models.RequestOptions"] = None, + access_condition: Optional["models.AccessCondition"] = None, + **kwargs + ) -> "models.Index": + """Creates a new search index or updates an index if it already exists. + + :param index_name: The definition of the index to create or update. + :type index_name: str + :param index: The definition of the index to create or update. + :type index: ~search_service_client.models.Index + :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters + to be added to an index by taking the index offline for at least a few seconds. This + temporarily causes indexing and query requests to fail. Performance and write availability of + the index can be impaired for several minutes after the index is updated, or longer for very + large indexes. + :type allow_index_downtime: bool + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Index or the result of cls(response) + :rtype: ~search_service_client.models.Index or ~search_service_client.models.Index + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + prefer = "return=representation" + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexName': self._serialize.url("index_name", index_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if allow_index_downtime is not None: + query_parameters['allowIndexDowntime'] = self._serialize.query("allow_index_downtime", allow_index_downtime, 'bool') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(index, 'Index') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('Index', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('Index', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/indexes(\'{indexName}\')'} + + async def delete( + self, + index_name: str, + request_options: Optional["models.RequestOptions"] = None, + access_condition: Optional["models.AccessCondition"] = None, + **kwargs + ) -> None: + """Deletes a search index and all the documents it contains. + + :param index_name: The name of the index to delete. + :type index_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexName': self._serialize.url("index_name", index_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/indexes(\'{indexName}\')'} + + async def get( + self, + index_name: str, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.Index": + """Retrieves an index definition. + + :param index_name: The name of the index to retrieve. + :type index_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Index or the result of cls(response) + :rtype: ~search_service_client.models.Index + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexName': self._serialize.url("index_name", index_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Index', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/indexes(\'{indexName}\')'} + + async def get_statistics( + self, + index_name: str, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.GetIndexStatisticsResult": + """Returns statistics for the given index, including a document count and storage usage. + + :param index_name: The name of the index for which to retrieve statistics. + :type index_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: GetIndexStatisticsResult or the result of cls(response) + :rtype: ~search_service_client.models.GetIndexStatisticsResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.GetIndexStatisticsResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get_statistics.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexName': self._serialize.url("index_name", index_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('GetIndexStatisticsResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_statistics.metadata = {'url': '/indexes(\'{indexName}\')/search.stats'} + + async def analyze( + self, + index_name: str, + request_todo: "models.AnalyzeRequest", + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.AnalyzeResult": + """Shows how an analyzer breaks text into tokens. + + :param index_name: The name of the index for which to test an analyzer. + :type index_name: str + :param request_todo: The text and analyzer or analysis components to test. + :type request_todo: ~search_service_client.models.AnalyzeRequest + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: AnalyzeResult or the result of cls(response) + :rtype: ~search_service_client.models.AnalyzeResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.analyze.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexName': self._serialize.url("index_name", index_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(request_todo, 'AnalyzeRequest') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('AnalyzeResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + analyze.metadata = {'url': '/indexes(\'{indexName}\')/search.analyze'} diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_search_service_client_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_search_service_client_operations_async.py new file mode 100644 index 0000000000000..1a1707edf14b8 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_search_service_client_operations_async.py @@ -0,0 +1,75 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class SearchServiceClientOperationsMixin: + + async def get_service_statistics( + self, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.ServiceStatistics": + """Gets service level statistics for a search service. + + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceStatistics or the result of cls(response) + :rtype: ~search_service_client.models.ServiceStatistics + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceStatistics"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get_service_statistics.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceStatistics', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_statistics.metadata = {'url': '/servicestats'} diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py new file mode 100644 index 0000000000000..b286e25f0b344 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py @@ -0,0 +1,386 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class SkillsetsOperations: + """SkillsetsOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~search_service_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create_or_update( + self, + skillset_name: str, + skillset: "models.Skillset", + request_options: Optional["models.RequestOptions"] = None, + access_condition: Optional["models.AccessCondition"] = None, + **kwargs + ) -> "models.Skillset": + """Creates a new skillset in a search service or updates the skillset if it already exists. + + :param skillset_name: The name of the skillset to create or update. + :type skillset_name: str + :param skillset: The skillset containing one or more skills to create or update in a search + service. + :type skillset: ~search_service_client.models.Skillset + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Skillset or the result of cls(response) + :rtype: ~search_service_client.models.Skillset or ~search_service_client.models.Skillset + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + prefer = "return=representation" + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(skillset, 'Skillset') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('Skillset', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('Skillset', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + + async def delete( + self, + skillset_name: str, + request_options: Optional["models.RequestOptions"] = None, + access_condition: Optional["models.AccessCondition"] = None, + **kwargs + ) -> None: + """Deletes a skillset in a search service. + + :param skillset_name: The name of the skillset to delete. + :type skillset_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + + async def get( + self, + skillset_name: str, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.Skillset": + """Retrieves a skillset in a search service. + + :param skillset_name: The name of the skillset to retrieve. + :type skillset_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Skillset or the result of cls(response) + :rtype: ~search_service_client.models.Skillset + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Skillset', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + + async def list( + self, + select: Optional[str] = None, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.ListSkillsetsResult": + """List all skillsets in a search service. + + :param select: Selects which top-level properties of the skillsets to retrieve. Specified as a + comma-separated list of JSON property names, or '*' for all properties. The default is all + properties. + :type select: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListSkillsetsResult or the result of cls(response) + :rtype: ~search_service_client.models.ListSkillsetsResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ListSkillsetsResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ListSkillsetsResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/skillsets'} + + async def create( + self, + skillset: "models.Skillset", + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.Skillset": + """Creates a new skillset in a search service. + + :param skillset: The skillset containing one or more skills to create in a search service. + :type skillset: ~search_service_client.models.Skillset + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Skillset or the result of cls(response) + :rtype: ~search_service_client.models.Skillset + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(skillset, 'Skillset') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Skillset', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create.metadata = {'url': '/skillsets'} diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py new file mode 100644 index 0000000000000..445c1f96f0b4f --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py @@ -0,0 +1,385 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class SynonymMapsOperations: + """SynonymMapsOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~search_service_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create_or_update( + self, + synonym_map_name: str, + synonym_map: "models.SynonymMap", + request_options: Optional["models.RequestOptions"] = None, + access_condition: Optional["models.AccessCondition"] = None, + **kwargs + ) -> "models.SynonymMap": + """Creates a new synonym map or updates a synonym map if it already exists. + + :param synonym_map_name: The name of the synonym map to create or update. + :type synonym_map_name: str + :param synonym_map: The definition of the synonym map to create or update. + :type synonym_map: ~search_service_client.models.SynonymMap + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SynonymMap or the result of cls(response) + :rtype: ~search_service_client.models.SynonymMap or ~search_service_client.models.SynonymMap + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + prefer = "return=representation" + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(synonym_map, 'SynonymMap') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SynonymMap', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('SynonymMap', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + + async def delete( + self, + synonym_map_name: str, + request_options: Optional["models.RequestOptions"] = None, + access_condition: Optional["models.AccessCondition"] = None, + **kwargs + ) -> None: + """Deletes a synonym map. + + :param synonym_map_name: The name of the synonym map to delete. + :type synonym_map_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + + async def get( + self, + synonym_map_name: str, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.SynonymMap": + """Retrieves a synonym map definition. + + :param synonym_map_name: The name of the synonym map to retrieve. + :type synonym_map_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SynonymMap or the result of cls(response) + :rtype: ~search_service_client.models.SynonymMap + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('SynonymMap', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + + async def list( + self, + select: Optional[str] = None, + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.ListSynonymMapsResult": + """Lists all synonym maps available for a search service. + + :param select: Selects which top-level properties of the synonym maps to retrieve. Specified as + a comma-separated list of JSON property names, or '*' for all properties. The default is all + properties. + :type select: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListSynonymMapsResult or the result of cls(response) + :rtype: ~search_service_client.models.ListSynonymMapsResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ListSynonymMapsResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ListSynonymMapsResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/synonymmaps'} + + async def create( + self, + synonym_map: "models.SynonymMap", + request_options: Optional["models.RequestOptions"] = None, + **kwargs + ) -> "models.SynonymMap": + """Creates a new synonym map. + + :param synonym_map: The definition of the synonym map to create. + :type synonym_map: ~search_service_client.models.SynonymMap + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SynonymMap or the result of cls(response) + :rtype: ~search_service_client.models.SynonymMap + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(synonym_map, 'SynonymMap') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('SynonymMap', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create.metadata = {'url': '/synonymmaps'} diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py new file mode 100644 index 0000000000000..9e8de4e9d799d --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py @@ -0,0 +1,430 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AccessCondition + from ._models_py3 import AnalyzeRequest + from ._models_py3 import AnalyzeResult + from ._models_py3 import Analyzer + from ._models_py3 import AsciiFoldingTokenFilter + from ._models_py3 import AzureActiveDirectoryApplicationCredentials + from ._models_py3 import CharFilter + from ._models_py3 import CjkBigramTokenFilter + from ._models_py3 import ClassicTokenizer + from ._models_py3 import CognitiveServicesAccount + from ._models_py3 import CognitiveServicesAccountKey + from ._models_py3 import CommonGramTokenFilter + from ._models_py3 import ConditionalSkill + from ._models_py3 import CorsOptions + from ._models_py3 import CustomAnalyzer + from ._models_py3 import DataChangeDetectionPolicy + from ._models_py3 import DataContainer + from ._models_py3 import DataDeletionDetectionPolicy + from ._models_py3 import DataSource + from ._models_py3 import DataSourceCredentials + from ._models_py3 import DefaultCognitiveServicesAccount + from ._models_py3 import DictionaryDecompounderTokenFilter + from ._models_py3 import DistanceScoringFunction + from ._models_py3 import DistanceScoringParameters + from ._models_py3 import EdgeNGramTokenFilter + from ._models_py3 import EdgeNGramTokenFilterV2 + from ._models_py3 import EdgeNGramTokenizer + from ._models_py3 import ElisionTokenFilter + from ._models_py3 import EncryptionKey + from ._models_py3 import EntityRecognitionSkill + from ._models_py3 import Field + from ._models_py3 import FieldMapping + from ._models_py3 import FieldMappingFunction + from ._models_py3 import FreshnessScoringFunction + from ._models_py3 import FreshnessScoringParameters + from ._models_py3 import GetIndexStatisticsResult + from ._models_py3 import HighWaterMarkChangeDetectionPolicy + from ._models_py3 import ImageAnalysisSkill + from ._models_py3 import Index + from ._models_py3 import Indexer + from ._models_py3 import IndexerExecutionInfo + from ._models_py3 import IndexerExecutionResult + from ._models_py3 import IndexerLimits + from ._models_py3 import IndexingParameters + from ._models_py3 import IndexingSchedule + from ._models_py3 import InputFieldMappingEntry + from ._models_py3 import ItemError + from ._models_py3 import ItemWarning + from ._models_py3 import KeepTokenFilter + from ._models_py3 import KeyPhraseExtractionSkill + from ._models_py3 import KeywordMarkerTokenFilter + from ._models_py3 import KeywordTokenizer + from ._models_py3 import KeywordTokenizerV2 + from ._models_py3 import LanguageDetectionSkill + from ._models_py3 import LengthTokenFilter + from ._models_py3 import LimitTokenFilter + from ._models_py3 import ListDataSourcesResult + from ._models_py3 import ListIndexersResult + from ._models_py3 import ListIndexesResult + from ._models_py3 import ListSkillsetsResult + from ._models_py3 import ListSynonymMapsResult + from ._models_py3 import MagnitudeScoringFunction + from ._models_py3 import MagnitudeScoringParameters + from ._models_py3 import MappingCharFilter + from ._models_py3 import MergeSkill + from ._models_py3 import MicrosoftLanguageStemmingTokenizer + from ._models_py3 import MicrosoftLanguageTokenizer + from ._models_py3 import NGramTokenFilter + from ._models_py3 import NGramTokenFilterV2 + from ._models_py3 import NGramTokenizer + from ._models_py3 import OcrSkill + from ._models_py3 import OutputFieldMappingEntry + from ._models_py3 import PathHierarchyTokenizerV2 + from ._models_py3 import PatternAnalyzer + from ._models_py3 import PatternCaptureTokenFilter + from ._models_py3 import PatternReplaceCharFilter + from ._models_py3 import PatternReplaceTokenFilter + from ._models_py3 import PatternTokenizer + from ._models_py3 import PhoneticTokenFilter + from ._models_py3 import RequestOptions + from ._models_py3 import ResourceCounter + from ._models_py3 import ScoringFunction + from ._models_py3 import ScoringProfile + from ._models_py3 import SearchError + from ._models_py3 import SentimentSkill + from ._models_py3 import ServiceCounters + from ._models_py3 import ServiceLimits + from ._models_py3 import ServiceStatistics + from ._models_py3 import ShaperSkill + from ._models_py3 import ShingleTokenFilter + from ._models_py3 import Skill + from ._models_py3 import Skillset + from ._models_py3 import SnowballTokenFilter + from ._models_py3 import SoftDeleteColumnDeletionDetectionPolicy + from ._models_py3 import SplitSkill + from ._models_py3 import SqlIntegratedChangeTrackingPolicy + from ._models_py3 import StandardAnalyzer + from ._models_py3 import StandardTokenizer + from ._models_py3 import StandardTokenizerV2 + from ._models_py3 import StemmerOverrideTokenFilter + from ._models_py3 import StemmerTokenFilter + from ._models_py3 import StopAnalyzer + from ._models_py3 import StopwordsTokenFilter + from ._models_py3 import Suggester + from ._models_py3 import SynonymMap + from ._models_py3 import SynonymTokenFilter + from ._models_py3 import TagScoringFunction + from ._models_py3 import TagScoringParameters + from ._models_py3 import TextTranslationSkill + from ._models_py3 import TextWeights + from ._models_py3 import TokenFilter + from ._models_py3 import TokenInfo + from ._models_py3 import Tokenizer + from ._models_py3 import TruncateTokenFilter + from ._models_py3 import UaxUrlEmailTokenizer + from ._models_py3 import UniqueTokenFilter + from ._models_py3 import WebApiSkill + from ._models_py3 import WordDelimiterTokenFilter +except (SyntaxError, ImportError): + from ._models import AccessCondition # type: ignore + from ._models import AnalyzeRequest # type: ignore + from ._models import AnalyzeResult # type: ignore + from ._models import Analyzer # type: ignore + from ._models import AsciiFoldingTokenFilter # type: ignore + from ._models import AzureActiveDirectoryApplicationCredentials # type: ignore + from ._models import CharFilter # type: ignore + from ._models import CjkBigramTokenFilter # type: ignore + from ._models import ClassicTokenizer # type: ignore + from ._models import CognitiveServicesAccount # type: ignore + from ._models import CognitiveServicesAccountKey # type: ignore + from ._models import CommonGramTokenFilter # type: ignore + from ._models import ConditionalSkill # type: ignore + from ._models import CorsOptions # type: ignore + from ._models import CustomAnalyzer # type: ignore + from ._models import DataChangeDetectionPolicy # type: ignore + from ._models import DataContainer # type: ignore + from ._models import DataDeletionDetectionPolicy # type: ignore + from ._models import DataSource # type: ignore + from ._models import DataSourceCredentials # type: ignore + from ._models import DefaultCognitiveServicesAccount # type: ignore + from ._models import DictionaryDecompounderTokenFilter # type: ignore + from ._models import DistanceScoringFunction # type: ignore + from ._models import DistanceScoringParameters # type: ignore + from ._models import EdgeNGramTokenFilter # type: ignore + from ._models import EdgeNGramTokenFilterV2 # type: ignore + from ._models import EdgeNGramTokenizer # type: ignore + from ._models import ElisionTokenFilter # type: ignore + from ._models import EncryptionKey # type: ignore + from ._models import EntityRecognitionSkill # type: ignore + from ._models import Field # type: ignore + from ._models import FieldMapping # type: ignore + from ._models import FieldMappingFunction # type: ignore + from ._models import FreshnessScoringFunction # type: ignore + from ._models import FreshnessScoringParameters # type: ignore + from ._models import GetIndexStatisticsResult # type: ignore + from ._models import HighWaterMarkChangeDetectionPolicy # type: ignore + from ._models import ImageAnalysisSkill # type: ignore + from ._models import Index # type: ignore + from ._models import Indexer # type: ignore + from ._models import IndexerExecutionInfo # type: ignore + from ._models import IndexerExecutionResult # type: ignore + from ._models import IndexerLimits # type: ignore + from ._models import IndexingParameters # type: ignore + from ._models import IndexingSchedule # type: ignore + from ._models import InputFieldMappingEntry # type: ignore + from ._models import ItemError # type: ignore + from ._models import ItemWarning # type: ignore + from ._models import KeepTokenFilter # type: ignore + from ._models import KeyPhraseExtractionSkill # type: ignore + from ._models import KeywordMarkerTokenFilter # type: ignore + from ._models import KeywordTokenizer # type: ignore + from ._models import KeywordTokenizerV2 # type: ignore + from ._models import LanguageDetectionSkill # type: ignore + from ._models import LengthTokenFilter # type: ignore + from ._models import LimitTokenFilter # type: ignore + from ._models import ListDataSourcesResult # type: ignore + from ._models import ListIndexersResult # type: ignore + from ._models import ListIndexesResult # type: ignore + from ._models import ListSkillsetsResult # type: ignore + from ._models import ListSynonymMapsResult # type: ignore + from ._models import MagnitudeScoringFunction # type: ignore + from ._models import MagnitudeScoringParameters # type: ignore + from ._models import MappingCharFilter # type: ignore + from ._models import MergeSkill # type: ignore + from ._models import MicrosoftLanguageStemmingTokenizer # type: ignore + from ._models import MicrosoftLanguageTokenizer # type: ignore + from ._models import NGramTokenFilter # type: ignore + from ._models import NGramTokenFilterV2 # type: ignore + from ._models import NGramTokenizer # type: ignore + from ._models import OcrSkill # type: ignore + from ._models import OutputFieldMappingEntry # type: ignore + from ._models import PathHierarchyTokenizerV2 # type: ignore + from ._models import PatternAnalyzer # type: ignore + from ._models import PatternCaptureTokenFilter # type: ignore + from ._models import PatternReplaceCharFilter # type: ignore + from ._models import PatternReplaceTokenFilter # type: ignore + from ._models import PatternTokenizer # type: ignore + from ._models import PhoneticTokenFilter # type: ignore + from ._models import RequestOptions # type: ignore + from ._models import ResourceCounter # type: ignore + from ._models import ScoringFunction # type: ignore + from ._models import ScoringProfile # type: ignore + from ._models import SearchError # type: ignore + from ._models import SentimentSkill # type: ignore + from ._models import ServiceCounters # type: ignore + from ._models import ServiceLimits # type: ignore + from ._models import ServiceStatistics # type: ignore + from ._models import ShaperSkill # type: ignore + from ._models import ShingleTokenFilter # type: ignore + from ._models import Skill # type: ignore + from ._models import Skillset # type: ignore + from ._models import SnowballTokenFilter # type: ignore + from ._models import SoftDeleteColumnDeletionDetectionPolicy # type: ignore + from ._models import SplitSkill # type: ignore + from ._models import SqlIntegratedChangeTrackingPolicy # type: ignore + from ._models import StandardAnalyzer # type: ignore + from ._models import StandardTokenizer # type: ignore + from ._models import StandardTokenizerV2 # type: ignore + from ._models import StemmerOverrideTokenFilter # type: ignore + from ._models import StemmerTokenFilter # type: ignore + from ._models import StopAnalyzer # type: ignore + from ._models import StopwordsTokenFilter # type: ignore + from ._models import Suggester # type: ignore + from ._models import SynonymMap # type: ignore + from ._models import SynonymTokenFilter # type: ignore + from ._models import TagScoringFunction # type: ignore + from ._models import TagScoringParameters # type: ignore + from ._models import TextTranslationSkill # type: ignore + from ._models import TextWeights # type: ignore + from ._models import TokenFilter # type: ignore + from ._models import TokenInfo # type: ignore + from ._models import Tokenizer # type: ignore + from ._models import TruncateTokenFilter # type: ignore + from ._models import UaxUrlEmailTokenizer # type: ignore + from ._models import UniqueTokenFilter # type: ignore + from ._models import WebApiSkill # type: ignore + from ._models import WordDelimiterTokenFilter # type: ignore + +from ._search_service_client_enums import ( + AnalyzerName, + CjkBigramTokenFilterScripts, + DataSourceType, + DataType, + EdgeNGramTokenFilterSide, + EntityCategory, + EntityRecognitionSkillLanguage, + ImageAnalysisSkillLanguage, + ImageDetail, + IndexerExecutionStatus, + IndexerStatus, + KeyPhraseExtractionSkillLanguage, + MicrosoftStemmingTokenizerLanguage, + MicrosoftTokenizerLanguage, + OcrSkillLanguage, + PhoneticEncoder, + RegexFlags, + ScoringFunctionAggregation, + ScoringFunctionInterpolation, + SentimentSkillLanguage, + SnowballTokenFilterLanguage, + SplitSkillLanguage, + StemmerTokenFilterLanguage, + StopwordsList, + TextExtractionAlgorithm, + TextSplitMode, + TextTranslationSkillLanguage, + TokenCharacterKind, + TokenFilterName, + TokenizerName, + VisualFeature, +) + +__all__ = [ + 'AccessCondition', + 'AnalyzeRequest', + 'AnalyzeResult', + 'Analyzer', + 'AsciiFoldingTokenFilter', + 'AzureActiveDirectoryApplicationCredentials', + 'CharFilter', + 'CjkBigramTokenFilter', + 'ClassicTokenizer', + 'CognitiveServicesAccount', + 'CognitiveServicesAccountKey', + 'CommonGramTokenFilter', + 'ConditionalSkill', + 'CorsOptions', + 'CustomAnalyzer', + 'DataChangeDetectionPolicy', + 'DataContainer', + 'DataDeletionDetectionPolicy', + 'DataSource', + 'DataSourceCredentials', + 'DefaultCognitiveServicesAccount', + 'DictionaryDecompounderTokenFilter', + 'DistanceScoringFunction', + 'DistanceScoringParameters', + 'EdgeNGramTokenFilter', + 'EdgeNGramTokenFilterV2', + 'EdgeNGramTokenizer', + 'ElisionTokenFilter', + 'EncryptionKey', + 'EntityRecognitionSkill', + 'Field', + 'FieldMapping', + 'FieldMappingFunction', + 'FreshnessScoringFunction', + 'FreshnessScoringParameters', + 'GetIndexStatisticsResult', + 'HighWaterMarkChangeDetectionPolicy', + 'ImageAnalysisSkill', + 'Index', + 'Indexer', + 'IndexerExecutionInfo', + 'IndexerExecutionResult', + 'IndexerLimits', + 'IndexingParameters', + 'IndexingSchedule', + 'InputFieldMappingEntry', + 'ItemError', + 'ItemWarning', + 'KeepTokenFilter', + 'KeyPhraseExtractionSkill', + 'KeywordMarkerTokenFilter', + 'KeywordTokenizer', + 'KeywordTokenizerV2', + 'LanguageDetectionSkill', + 'LengthTokenFilter', + 'LimitTokenFilter', + 'ListDataSourcesResult', + 'ListIndexersResult', + 'ListIndexesResult', + 'ListSkillsetsResult', + 'ListSynonymMapsResult', + 'MagnitudeScoringFunction', + 'MagnitudeScoringParameters', + 'MappingCharFilter', + 'MergeSkill', + 'MicrosoftLanguageStemmingTokenizer', + 'MicrosoftLanguageTokenizer', + 'NGramTokenFilter', + 'NGramTokenFilterV2', + 'NGramTokenizer', + 'OcrSkill', + 'OutputFieldMappingEntry', + 'PathHierarchyTokenizerV2', + 'PatternAnalyzer', + 'PatternCaptureTokenFilter', + 'PatternReplaceCharFilter', + 'PatternReplaceTokenFilter', + 'PatternTokenizer', + 'PhoneticTokenFilter', + 'RequestOptions', + 'ResourceCounter', + 'ScoringFunction', + 'ScoringProfile', + 'SearchError', + 'SentimentSkill', + 'ServiceCounters', + 'ServiceLimits', + 'ServiceStatistics', + 'ShaperSkill', + 'ShingleTokenFilter', + 'Skill', + 'Skillset', + 'SnowballTokenFilter', + 'SoftDeleteColumnDeletionDetectionPolicy', + 'SplitSkill', + 'SqlIntegratedChangeTrackingPolicy', + 'StandardAnalyzer', + 'StandardTokenizer', + 'StandardTokenizerV2', + 'StemmerOverrideTokenFilter', + 'StemmerTokenFilter', + 'StopAnalyzer', + 'StopwordsTokenFilter', + 'Suggester', + 'SynonymMap', + 'SynonymTokenFilter', + 'TagScoringFunction', + 'TagScoringParameters', + 'TextTranslationSkill', + 'TextWeights', + 'TokenFilter', + 'TokenInfo', + 'Tokenizer', + 'TruncateTokenFilter', + 'UaxUrlEmailTokenizer', + 'UniqueTokenFilter', + 'WebApiSkill', + 'WordDelimiterTokenFilter', + 'AnalyzerName', + 'CjkBigramTokenFilterScripts', + 'DataSourceType', + 'DataType', + 'EdgeNGramTokenFilterSide', + 'EntityCategory', + 'EntityRecognitionSkillLanguage', + 'ImageAnalysisSkillLanguage', + 'ImageDetail', + 'IndexerExecutionStatus', + 'IndexerStatus', + 'KeyPhraseExtractionSkillLanguage', + 'MicrosoftStemmingTokenizerLanguage', + 'MicrosoftTokenizerLanguage', + 'OcrSkillLanguage', + 'PhoneticEncoder', + 'RegexFlags', + 'ScoringFunctionAggregation', + 'ScoringFunctionInterpolation', + 'SentimentSkillLanguage', + 'SnowballTokenFilterLanguage', + 'SplitSkillLanguage', + 'StemmerTokenFilterLanguage', + 'StopwordsList', + 'TextExtractionAlgorithm', + 'TextSplitMode', + 'TextTranslationSkillLanguage', + 'TokenCharacterKind', + 'TokenFilterName', + 'TokenizerName', + 'VisualFeature', +] diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py new file mode 100644 index 0000000000000..bcdd12e57879d --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py @@ -0,0 +1,5186 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + + +class AccessCondition(msrest.serialization.Model): + """Parameter group. + + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str + """ + + _attribute_map = { + 'if_match': {'key': 'If-Match', 'type': 'str'}, + 'if_none_match': {'key': 'If-None-Match', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(AccessCondition, self).__init__(**kwargs) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + + +class Analyzer(msrest.serialization.Model): + """Abstract base class for analyzers. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: CustomAnalyzer, PatternAnalyzer, StandardAnalyzer, StopAnalyzer. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.CustomAnalyzer': 'CustomAnalyzer', '#Microsoft.Azure.Search.PatternAnalyzer': 'PatternAnalyzer', '#Microsoft.Azure.Search.StandardAnalyzer': 'StandardAnalyzer', '#Microsoft.Azure.Search.StopAnalyzer': 'StopAnalyzer'} + } + + def __init__( + self, + **kwargs + ): + super(Analyzer, self).__init__(**kwargs) + self.odata_type = None + self.name = kwargs.get('name', None) + + +class AnalyzeRequest(msrest.serialization.Model): + """Specifies some text and analysis components used to break that text into tokens. + + All required parameters must be populated in order to send to Azure. + + :param text: Required. The text to break into tokens. + :type text: str + :param analyzer: The name of the analyzer to use to break the given text. If this parameter is + not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are + mutually exclusive. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', + 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- + Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', + 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', + 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', + 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', + 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', + 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', + 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', + 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', + 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- + PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', + 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', + 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', + 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', + 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', + 'simple', 'stop', 'whitespace'. + :type analyzer: str or ~search_service_client.models.AnalyzerName + :param tokenizer: The name of the tokenizer to use to break the given text. If this parameter + is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters + are mutually exclusive. Possible values include: 'classic', 'edgeNGram', 'keyword_v2', + 'letter', 'lowercase', 'microsoft_language_tokenizer', 'microsoft_language_stemming_tokenizer', + 'nGram', 'path_hierarchy_v2', 'pattern', 'standard_v2', 'uax_url_email', 'whitespace'. + :type tokenizer: str or ~search_service_client.models.TokenizerName + :param token_filters: An optional list of token filters to use when breaking the given text. + This parameter can only be set when using the tokenizer parameter. + :type token_filters: list[str or ~search_service_client.models.TokenFilterName] + :param char_filters: An optional list of character filters to use when breaking the given text. + This parameter can only be set when using the tokenizer parameter. + :type char_filters: list[str] + """ + + _validation = { + 'text': {'required': True}, + } + + _attribute_map = { + 'text': {'key': 'text', 'type': 'str'}, + 'analyzer': {'key': 'analyzer', 'type': 'str'}, + 'tokenizer': {'key': 'tokenizer', 'type': 'str'}, + 'token_filters': {'key': 'tokenFilters', 'type': '[str]'}, + 'char_filters': {'key': 'charFilters', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(AnalyzeRequest, self).__init__(**kwargs) + self.text = kwargs.get('text', None) + self.analyzer = kwargs.get('analyzer', None) + self.tokenizer = kwargs.get('tokenizer', None) + self.token_filters = kwargs.get('token_filters', None) + self.char_filters = kwargs.get('char_filters', None) + + +class AnalyzeResult(msrest.serialization.Model): + """The result of testing an analyzer on text. + + All required parameters must be populated in order to send to Azure. + + :param tokens: Required. The list of tokens returned by the analyzer specified in the request. + :type tokens: list[~search_service_client.models.TokenInfo] + """ + + _validation = { + 'tokens': {'required': True}, + } + + _attribute_map = { + 'tokens': {'key': 'tokens', 'type': '[TokenInfo]'}, + } + + def __init__( + self, + **kwargs + ): + super(AnalyzeResult, self).__init__(**kwargs) + self.tokens = kwargs.get('tokens', None) + + +class TokenFilter(msrest.serialization.Model): + """Abstract base class for token filters. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: AsciiFoldingTokenFilter, CjkBigramTokenFilter, CommonGramTokenFilter, DictionaryDecompounderTokenFilter, EdgeNGramTokenFilter, EdgeNGramTokenFilterV2, ElisionTokenFilter, KeepTokenFilter, KeywordMarkerTokenFilter, LengthTokenFilter, LimitTokenFilter, NGramTokenFilter, NGramTokenFilterV2, PatternCaptureTokenFilter, PatternReplaceTokenFilter, PhoneticTokenFilter, ShingleTokenFilter, SnowballTokenFilter, StemmerOverrideTokenFilter, StemmerTokenFilter, StopwordsTokenFilter, SynonymTokenFilter, TruncateTokenFilter, UniqueTokenFilter, WordDelimiterTokenFilter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.AsciiFoldingTokenFilter': 'AsciiFoldingTokenFilter', '#Microsoft.Azure.Search.CjkBigramTokenFilter': 'CjkBigramTokenFilter', '#Microsoft.Azure.Search.CommonGramTokenFilter': 'CommonGramTokenFilter', '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter': 'DictionaryDecompounderTokenFilter', '#Microsoft.Azure.Search.EdgeNGramTokenFilter': 'EdgeNGramTokenFilter', '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2': 'EdgeNGramTokenFilterV2', '#Microsoft.Azure.Search.ElisionTokenFilter': 'ElisionTokenFilter', '#Microsoft.Azure.Search.KeepTokenFilter': 'KeepTokenFilter', '#Microsoft.Azure.Search.KeywordMarkerTokenFilter': 'KeywordMarkerTokenFilter', '#Microsoft.Azure.Search.LengthTokenFilter': 'LengthTokenFilter', '#Microsoft.Azure.Search.LimitTokenFilter': 'LimitTokenFilter', '#Microsoft.Azure.Search.NGramTokenFilter': 'NGramTokenFilter', '#Microsoft.Azure.Search.NGramTokenFilterV2': 'NGramTokenFilterV2', '#Microsoft.Azure.Search.PatternCaptureTokenFilter': 'PatternCaptureTokenFilter', '#Microsoft.Azure.Search.PatternReplaceTokenFilter': 'PatternReplaceTokenFilter', '#Microsoft.Azure.Search.PhoneticTokenFilter': 'PhoneticTokenFilter', '#Microsoft.Azure.Search.ShingleTokenFilter': 'ShingleTokenFilter', '#Microsoft.Azure.Search.SnowballTokenFilter': 'SnowballTokenFilter', '#Microsoft.Azure.Search.StemmerOverrideTokenFilter': 'StemmerOverrideTokenFilter', '#Microsoft.Azure.Search.StemmerTokenFilter': 'StemmerTokenFilter', '#Microsoft.Azure.Search.StopwordsTokenFilter': 'StopwordsTokenFilter', '#Microsoft.Azure.Search.SynonymTokenFilter': 'SynonymTokenFilter', '#Microsoft.Azure.Search.TruncateTokenFilter': 'TruncateTokenFilter', '#Microsoft.Azure.Search.UniqueTokenFilter': 'UniqueTokenFilter', '#Microsoft.Azure.Search.WordDelimiterTokenFilter': 'WordDelimiterTokenFilter'} + } + + def __init__( + self, + **kwargs + ): + super(TokenFilter, self).__init__(**kwargs) + self.odata_type = None + self.name = kwargs.get('name', None) + + +class AsciiFoldingTokenFilter(TokenFilter): + """Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param preserve_original: A value indicating whether the original token will be kept. Default + is false. + :type preserve_original: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'preserve_original': {'key': 'preserveOriginal', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(AsciiFoldingTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.AsciiFoldingTokenFilter' + self.preserve_original = kwargs.get('preserve_original', False) + + +class AzureActiveDirectoryApplicationCredentials(msrest.serialization.Model): + """Credentials of a registered application created for your search service, used for authenticated access to the encryption keys stored in Azure Key Vault. + + All required parameters must be populated in order to send to Azure. + + :param application_id: Required. An AAD Application ID that was granted the required access + permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The + Application ID should not be confused with the Object ID for your AAD Application. + :type application_id: str + :param application_secret: The authentication key of the specified AAD application. + :type application_secret: str + """ + + _validation = { + 'application_id': {'required': True}, + } + + _attribute_map = { + 'application_id': {'key': 'applicationId', 'type': 'str'}, + 'application_secret': {'key': 'applicationSecret', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(AzureActiveDirectoryApplicationCredentials, self).__init__(**kwargs) + self.application_id = kwargs.get('application_id', None) + self.application_secret = kwargs.get('application_secret', None) + + +class CharFilter(msrest.serialization.Model): + """Abstract base class for character filters. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MappingCharFilter, PatternReplaceCharFilter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the char filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.MappingCharFilter': 'MappingCharFilter', '#Microsoft.Azure.Search.PatternReplaceCharFilter': 'PatternReplaceCharFilter'} + } + + def __init__( + self, + **kwargs + ): + super(CharFilter, self).__init__(**kwargs) + self.odata_type = None + self.name = kwargs.get('name', None) + + +class CjkBigramTokenFilter(TokenFilter): + """Forms bigrams of CJK terms that are generated from StandardTokenizer. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param ignore_scripts: The scripts to ignore. + :type ignore_scripts: list[str or ~search_service_client.models.CjkBigramTokenFilterScripts] + :param output_unigrams: A value indicating whether to output both unigrams and bigrams (if + true), or just bigrams (if false). Default is false. + :type output_unigrams: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'ignore_scripts': {'key': 'ignoreScripts', 'type': '[str]'}, + 'output_unigrams': {'key': 'outputUnigrams', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(CjkBigramTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.CjkBigramTokenFilter' + self.ignore_scripts = kwargs.get('ignore_scripts', None) + self.output_unigrams = kwargs.get('output_unigrams', False) + + +class Tokenizer(msrest.serialization.Model): + """Abstract base class for tokenizers. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, PathHierarchyTokenizerV2, PatternTokenizer, StandardTokenizer, StandardTokenizerV2, UaxUrlEmailTokenizer. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.ClassicTokenizer': 'ClassicTokenizer', '#Microsoft.Azure.Search.EdgeNGramTokenizer': 'EdgeNGramTokenizer', '#Microsoft.Azure.Search.KeywordTokenizer': 'KeywordTokenizer', '#Microsoft.Azure.Search.KeywordTokenizerV2': 'KeywordTokenizerV2', '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer': 'MicrosoftLanguageStemmingTokenizer', '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer': 'MicrosoftLanguageTokenizer', '#Microsoft.Azure.Search.NGramTokenizer': 'NGramTokenizer', '#Microsoft.Azure.Search.PathHierarchyTokenizerV2': 'PathHierarchyTokenizerV2', '#Microsoft.Azure.Search.PatternTokenizer': 'PatternTokenizer', '#Microsoft.Azure.Search.StandardTokenizer': 'StandardTokenizer', '#Microsoft.Azure.Search.StandardTokenizerV2': 'StandardTokenizerV2', '#Microsoft.Azure.Search.UaxUrlEmailTokenizer': 'UaxUrlEmailTokenizer'} + } + + def __init__( + self, + **kwargs + ): + super(Tokenizer, self).__init__(**kwargs) + self.odata_type = None + self.name = kwargs.get('name', None) + + +class ClassicTokenizer(Tokenizer): + """Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(ClassicTokenizer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.ClassicTokenizer' + self.max_token_length = kwargs.get('max_token_length', 255) + + +class CognitiveServicesAccount(msrest.serialization.Model): + """Abstract base class for describing any cognitive service resource attached to the skillset. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: CognitiveServicesAccountKey, DefaultCognitiveServicesAccount. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param description: + :type description: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.CognitiveServicesByKey': 'CognitiveServicesAccountKey', '#Microsoft.Azure.Search.DefaultCognitiveServices': 'DefaultCognitiveServicesAccount'} + } + + def __init__( + self, + **kwargs + ): + super(CognitiveServicesAccount, self).__init__(**kwargs) + self.odata_type = None + self.description = kwargs.get('description', None) + + +class CognitiveServicesAccountKey(CognitiveServicesAccount): + """A cognitive service resource provisioned with a key that is attached to a skillset. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param description: + :type description: str + :param key: Required. + :type key: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'key': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'key': {'key': 'key', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(CognitiveServicesAccountKey, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.CognitiveServicesByKey' + self.key = kwargs.get('key', None) + + +class CommonGramTokenFilter(TokenFilter): + """Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param common_words: Required. The set of common words. + :type common_words: list[str] + :param ignore_case: A value indicating whether common words matching will be case insensitive. + Default is false. + :type ignore_case: bool + :param use_query_mode: A value that indicates whether the token filter is in query mode. When + in query mode, the token filter generates bigrams and then removes common words and single + terms followed by a common word. Default is false. + :type use_query_mode: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'common_words': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'common_words': {'key': 'commonWords', 'type': '[str]'}, + 'ignore_case': {'key': 'ignoreCase', 'type': 'bool'}, + 'use_query_mode': {'key': 'queryMode', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(CommonGramTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.CommonGramTokenFilter' + self.common_words = kwargs.get('common_words', None) + self.ignore_case = kwargs.get('ignore_case', False) + self.use_query_mode = kwargs.get('use_query_mode', False) + + +class Skill(msrest.serialization.Model): + """Abstract base class for skills. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: WebApiSkill, EntityRecognitionSkill, KeyPhraseExtractionSkill, LanguageDetectionSkill, MergeSkill, SentimentSkill, SplitSkill, TextTranslationSkill, ConditionalSkill, ShaperSkill, ImageAnalysisSkill, OcrSkill. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Skills.Custom.WebApiSkill': 'WebApiSkill', '#Microsoft.Skills.Text.EntityRecognitionSkill': 'EntityRecognitionSkill', '#Microsoft.Skills.Text.KeyPhraseExtractionSkill': 'KeyPhraseExtractionSkill', '#Microsoft.Skills.Text.LanguageDetectionSkill': 'LanguageDetectionSkill', '#Microsoft.Skills.Text.MergeSkill': 'MergeSkill', '#Microsoft.Skills.Text.SentimentSkill': 'SentimentSkill', '#Microsoft.Skills.Text.SplitSkill': 'SplitSkill', '#Microsoft.Skills.Text.TranslationSkill': 'TextTranslationSkill', '#Microsoft.Skills.Util.ConditionalSkill': 'ConditionalSkill', '#Microsoft.Skills.Util.ShaperSkill': 'ShaperSkill', '#Microsoft.Skills.Vision.ImageAnalysisSkill': 'ImageAnalysisSkill', '#Microsoft.Skills.Vision.OcrSkill': 'OcrSkill'} + } + + def __init__( + self, + **kwargs + ): + super(Skill, self).__init__(**kwargs) + self.odata_type = None + self.name = kwargs.get('name', None) + self.description = kwargs.get('description', None) + self.context = kwargs.get('context', None) + self.inputs = kwargs.get('inputs', None) + self.outputs = kwargs.get('outputs', None) + + +class ConditionalSkill(Skill): + """A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + } + + def __init__( + self, + **kwargs + ): + super(ConditionalSkill, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Skills.Util.ConditionalSkill' + + +class CorsOptions(msrest.serialization.Model): + """Defines options to control Cross-Origin Resource Sharing (CORS) for an index. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The list of origins from which JavaScript code will be + granted access to your index. Can contain a list of hosts of the form {protocol}://{fully- + qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). + :type allowed_origins: list[str] + :param max_age_in_seconds: The duration for which browsers should cache CORS preflight + responses. Defaults to 5 minutes. + :type max_age_in_seconds: long + """ + + _validation = { + 'allowed_origins': {'required': True}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'allowedOrigins', 'type': '[str]'}, + 'max_age_in_seconds': {'key': 'maxAgeInSeconds', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(CorsOptions, self).__init__(**kwargs) + self.allowed_origins = kwargs.get('allowed_origins', None) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) + + +class CustomAnalyzer(Analyzer): + """Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param tokenizer: Required. The name of the tokenizer to use to divide continuous text into a + sequence of tokens, such as breaking a sentence into words. Possible values include: 'classic', + 'edgeNGram', 'keyword_v2', 'letter', 'lowercase', 'microsoft_language_tokenizer', + 'microsoft_language_stemming_tokenizer', 'nGram', 'path_hierarchy_v2', 'pattern', + 'standard_v2', 'uax_url_email', 'whitespace'. + :type tokenizer: str or ~search_service_client.models.TokenizerName + :param token_filters: A list of token filters used to filter out or modify the tokens generated + by a tokenizer. For example, you can specify a lowercase filter that converts all characters to + lowercase. The filters are run in the order in which they are listed. + :type token_filters: list[str or ~search_service_client.models.TokenFilterName] + :param char_filters: A list of character filters used to prepare input text before it is + processed by the tokenizer. For instance, they can replace certain characters or symbols. The + filters are run in the order in which they are listed. + :type char_filters: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'tokenizer': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'tokenizer': {'key': 'tokenizer', 'type': 'str'}, + 'token_filters': {'key': 'tokenFilters', 'type': '[str]'}, + 'char_filters': {'key': 'charFilters', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(CustomAnalyzer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer' + self.tokenizer = kwargs.get('tokenizer', None) + self.token_filters = kwargs.get('token_filters', None) + self.char_filters = kwargs.get('char_filters', None) + + +class DataChangeDetectionPolicy(msrest.serialization.Model): + """Abstract base class for data change detection policies. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: HighWaterMarkChangeDetectionPolicy, SqlIntegratedChangeTrackingPolicy. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy': 'HighWaterMarkChangeDetectionPolicy', '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy': 'SqlIntegratedChangeTrackingPolicy'} + } + + def __init__( + self, + **kwargs + ): + super(DataChangeDetectionPolicy, self).__init__(**kwargs) + self.odata_type = None + + +class DataContainer(msrest.serialization.Model): + """Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the table or view (for Azure SQL data source) or collection + (for CosmosDB data source) that will be indexed. + :type name: str + :param query: A query that is applied to this data container. The syntax and meaning of this + parameter is datasource-specific. Not supported by Azure SQL datasources. + :type query: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'query': {'key': 'query', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(DataContainer, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.query = kwargs.get('query', None) + + +class DataDeletionDetectionPolicy(msrest.serialization.Model): + """Abstract base class for data deletion detection policies. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: SoftDeleteColumnDeletionDetectionPolicy. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy': 'SoftDeleteColumnDeletionDetectionPolicy'} + } + + def __init__( + self, + **kwargs + ): + super(DataDeletionDetectionPolicy, self).__init__(**kwargs) + self.odata_type = None + + +class DataSource(msrest.serialization.Model): + """Represents a datasource definition, which can be used to configure an indexer. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the datasource. + :type name: str + :param description: The description of the datasource. + :type description: str + :param type: Required. The type of the datasource. Possible values include: 'azuresql', + 'cosmosdb', 'azureblob', 'azuretable', 'mysql'. + :type type: str or ~search_service_client.models.DataSourceType + :param credentials: Required. Credentials for the datasource. + :type credentials: ~search_service_client.models.DataSourceCredentials + :param container: Required. The data container for the datasource. + :type container: ~search_service_client.models.DataContainer + :param data_change_detection_policy: The data change detection policy for the datasource. + :type data_change_detection_policy: ~search_service_client.models.DataChangeDetectionPolicy + :param data_deletion_detection_policy: The data deletion detection policy for the datasource. + :type data_deletion_detection_policy: ~search_service_client.models.DataDeletionDetectionPolicy + :param e_tag: The ETag of the DataSource. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'type': {'required': True}, + 'credentials': {'required': True}, + 'container': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'DataSourceCredentials'}, + 'container': {'key': 'container', 'type': 'DataContainer'}, + 'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'}, + 'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(DataSource, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.description = kwargs.get('description', None) + self.type = kwargs.get('type', None) + self.credentials = kwargs.get('credentials', None) + self.container = kwargs.get('container', None) + self.data_change_detection_policy = kwargs.get('data_change_detection_policy', None) + self.data_deletion_detection_policy = kwargs.get('data_deletion_detection_policy', None) + self.e_tag = kwargs.get('e_tag', None) + + +class DataSourceCredentials(msrest.serialization.Model): + """Represents credentials that can be used to connect to a datasource. + + :param connection_string: The connection string for the datasource. + :type connection_string: str + """ + + _attribute_map = { + 'connection_string': {'key': 'connectionString', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(DataSourceCredentials, self).__init__(**kwargs) + self.connection_string = kwargs.get('connection_string', None) + + +class DefaultCognitiveServicesAccount(CognitiveServicesAccount): + """An empty object that represents the default cognitive service resource for a skillset. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param description: + :type description: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(DefaultCognitiveServicesAccount, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.DefaultCognitiveServices' + + +class DictionaryDecompounderTokenFilter(TokenFilter): + """Decomposes compound words found in many Germanic languages. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param word_list: Required. The list of words to match against. + :type word_list: list[str] + :param min_word_size: The minimum word size. Only words longer than this get processed. Default + is 5. Maximum is 300. + :type min_word_size: int + :param min_subword_size: The minimum subword size. Only subwords longer than this are + outputted. Default is 2. Maximum is 300. + :type min_subword_size: int + :param max_subword_size: The maximum subword size. Only subwords shorter than this are + outputted. Default is 15. Maximum is 300. + :type max_subword_size: int + :param only_longest_match: A value indicating whether to add only the longest matching subword + to the output. Default is false. + :type only_longest_match: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'word_list': {'required': True}, + 'min_word_size': {'maximum': 300}, + 'min_subword_size': {'maximum': 300}, + 'max_subword_size': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'word_list': {'key': 'wordList', 'type': '[str]'}, + 'min_word_size': {'key': 'minWordSize', 'type': 'int'}, + 'min_subword_size': {'key': 'minSubwordSize', 'type': 'int'}, + 'max_subword_size': {'key': 'maxSubwordSize', 'type': 'int'}, + 'only_longest_match': {'key': 'onlyLongestMatch', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(DictionaryDecompounderTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' + self.word_list = kwargs.get('word_list', None) + self.min_word_size = kwargs.get('min_word_size', 5) + self.min_subword_size = kwargs.get('min_subword_size', 2) + self.max_subword_size = kwargs.get('max_subword_size', 15) + self.only_longest_match = kwargs.get('only_longest_match', False) + + +class ScoringFunction(msrest.serialization.Model): + """Abstract base class for functions that can modify document scores during ranking. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: DistanceScoringFunction, FreshnessScoringFunction, MagnitudeScoringFunction, TagScoringFunction. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param field_name: Required. The name of the field used as input to the scoring function. + :type field_name: str + :param boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :type boost: float + :param interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', + 'logarithmic'. + :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + """ + + _validation = { + 'type': {'required': True}, + 'field_name': {'required': True}, + 'boost': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'field_name': {'key': 'fieldName', 'type': 'str'}, + 'boost': {'key': 'boost', 'type': 'float'}, + 'interpolation': {'key': 'interpolation', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'distance': 'DistanceScoringFunction', 'freshness': 'FreshnessScoringFunction', 'magnitude': 'MagnitudeScoringFunction', 'tag': 'TagScoringFunction'} + } + + def __init__( + self, + **kwargs + ): + super(ScoringFunction, self).__init__(**kwargs) + self.type = None + self.field_name = kwargs.get('field_name', None) + self.boost = kwargs.get('boost', None) + self.interpolation = kwargs.get('interpolation', None) + + +class DistanceScoringFunction(ScoringFunction): + """Defines a function that boosts scores based on distance from a geographic location. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param field_name: Required. The name of the field used as input to the scoring function. + :type field_name: str + :param boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :type boost: float + :param interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', + 'logarithmic'. + :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + :param parameters: Required. Parameter values for the distance scoring function. + :type parameters: ~search_service_client.models.DistanceScoringParameters + """ + + _validation = { + 'type': {'required': True}, + 'field_name': {'required': True}, + 'boost': {'required': True}, + 'parameters': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'field_name': {'key': 'fieldName', 'type': 'str'}, + 'boost': {'key': 'boost', 'type': 'float'}, + 'interpolation': {'key': 'interpolation', 'type': 'str'}, + 'parameters': {'key': 'distance', 'type': 'DistanceScoringParameters'}, + } + + def __init__( + self, + **kwargs + ): + super(DistanceScoringFunction, self).__init__(**kwargs) + self.type = 'distance' + self.parameters = kwargs.get('parameters', None) + + +class DistanceScoringParameters(msrest.serialization.Model): + """Provides parameter values to a distance scoring function. + + All required parameters must be populated in order to send to Azure. + + :param reference_point_parameter: Required. The name of the parameter passed in search queries + to specify the reference location. + :type reference_point_parameter: str + :param boosting_distance: Required. The distance in kilometers from the reference location + where the boosting range ends. + :type boosting_distance: float + """ + + _validation = { + 'reference_point_parameter': {'required': True}, + 'boosting_distance': {'required': True}, + } + + _attribute_map = { + 'reference_point_parameter': {'key': 'referencePointParameter', 'type': 'str'}, + 'boosting_distance': {'key': 'boostingDistance', 'type': 'float'}, + } + + def __init__( + self, + **kwargs + ): + super(DistanceScoringParameters, self).__init__(**kwargs) + self.reference_point_parameter = kwargs.get('reference_point_parameter', None) + self.boosting_distance = kwargs.get('boosting_distance', None) + + +class EdgeNGramTokenFilter(TokenFilter): + """Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + maxGram. + :type min_gram: int + :param max_gram: The maximum n-gram length. Default is 2. + :type max_gram: int + :param side: Specifies which side of the input the n-gram should be generated from. Default is + "front". Possible values include: 'front', 'back'. + :type side: str or ~search_service_client.models.EdgeNGramTokenFilterSide + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min_gram': {'key': 'minGram', 'type': 'int'}, + 'max_gram': {'key': 'maxGram', 'type': 'int'}, + 'side': {'key': 'side', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(EdgeNGramTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilter' + self.min_gram = kwargs.get('min_gram', 1) + self.max_gram = kwargs.get('max_gram', 2) + self.side = kwargs.get('side', None) + + +class EdgeNGramTokenFilterV2(TokenFilter): + """Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :type min_gram: int + :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :type max_gram: int + :param side: Specifies which side of the input the n-gram should be generated from. Default is + "front". Possible values include: 'front', 'back'. + :type side: str or ~search_service_client.models.EdgeNGramTokenFilterSide + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'min_gram': {'maximum': 300}, + 'max_gram': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min_gram': {'key': 'minGram', 'type': 'int'}, + 'max_gram': {'key': 'maxGram', 'type': 'int'}, + 'side': {'key': 'side', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(EdgeNGramTokenFilterV2, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2' + self.min_gram = kwargs.get('min_gram', 1) + self.max_gram = kwargs.get('max_gram', 2) + self.side = kwargs.get('side', None) + + +class EdgeNGramTokenizer(Tokenizer): + """Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :type min_gram: int + :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :type max_gram: int + :param token_chars: Character classes to keep in the tokens. + :type token_chars: list[str or ~search_service_client.models.TokenCharacterKind] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'min_gram': {'maximum': 300}, + 'max_gram': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min_gram': {'key': 'minGram', 'type': 'int'}, + 'max_gram': {'key': 'maxGram', 'type': 'int'}, + 'token_chars': {'key': 'tokenChars', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(EdgeNGramTokenizer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenizer' + self.min_gram = kwargs.get('min_gram', 1) + self.max_gram = kwargs.get('max_gram', 2) + self.token_chars = kwargs.get('token_chars', None) + + +class ElisionTokenFilter(TokenFilter): + """Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param articles: The set of articles to remove. + :type articles: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'articles': {'key': 'articles', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(ElisionTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.ElisionTokenFilter' + self.articles = kwargs.get('articles', None) + + +class EncryptionKey(msrest.serialization.Model): + """A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. + + All required parameters must be populated in order to send to Azure. + + :param key_vault_key_name: Required. The name of your Azure Key Vault key to be used to encrypt + your data at rest. + :type key_vault_key_name: str + :param key_vault_key_version: Required. The version of your Azure Key Vault key to be used to + encrypt your data at rest. + :type key_vault_key_version: str + :param key_vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, + that contains the key to be used to encrypt your data at rest. An example URI might be + https://my-keyvault-name.vault.azure.net. + :type key_vault_uri: str + :param access_credentials: Optional Azure Active Directory credentials used for accessing your + Azure Key Vault. Not required if using managed identity instead. + :type access_credentials: + ~search_service_client.models.AzureActiveDirectoryApplicationCredentials + """ + + _validation = { + 'key_vault_key_name': {'required': True}, + 'key_vault_key_version': {'required': True}, + 'key_vault_uri': {'required': True}, + } + + _attribute_map = { + 'key_vault_key_name': {'key': 'keyVaultKeyName', 'type': 'str'}, + 'key_vault_key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'}, + 'key_vault_uri': {'key': 'keyVaultUri', 'type': 'str'}, + 'access_credentials': {'key': 'accessCredentials', 'type': 'AzureActiveDirectoryApplicationCredentials'}, + } + + def __init__( + self, + **kwargs + ): + super(EncryptionKey, self).__init__(**kwargs) + self.key_vault_key_name = kwargs.get('key_vault_key_name', None) + self.key_vault_key_version = kwargs.get('key_vault_key_version', None) + self.key_vault_uri = kwargs.get('key_vault_uri', None) + self.access_credentials = kwargs.get('access_credentials', None) + + +class EntityRecognitionSkill(Skill): + """Text analytics entity recognition. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param categories: A list of entity categories that should be extracted. + :type categories: list[str or ~search_service_client.models.EntityCategory] + :param default_language_code: A value indicating which language code to use. Default is en. + Possible values include: 'ar', 'cs', 'zh-Hans', 'zh-Hant', 'da', 'nl', 'en', 'fi', 'fr', 'de', + 'el', 'hu', 'it', 'ja', 'ko', 'no', 'pl', 'pt-PT', 'pt-BR', 'ru', 'es', 'sv', 'tr'. + :type default_language_code: str or + ~search_service_client.models.EntityRecognitionSkillLanguage + :param include_typeless_entities: Determines whether or not to include entities which are well + known but don't conform to a pre-defined type. If this configuration is not set (default), set + to null or set to false, entities which don't conform to one of the pre-defined types will not + be surfaced. + :type include_typeless_entities: bool + :param minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence score is greater than the value specified. If not set (default), or if explicitly + set to null, all entities will be included. + :type minimum_precision: float + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'categories': {'key': 'categories', 'type': '[str]'}, + 'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'}, + 'include_typeless_entities': {'key': 'includeTypelessEntities', 'type': 'bool'}, + 'minimum_precision': {'key': 'minimumPrecision', 'type': 'float'}, + } + + def __init__( + self, + **kwargs + ): + super(EntityRecognitionSkill, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Skills.Text.EntityRecognitionSkill' + self.categories = kwargs.get('categories', None) + self.default_language_code = kwargs.get('default_language_code', None) + self.include_typeless_entities = kwargs.get('include_typeless_entities', None) + self.minimum_precision = kwargs.get('minimum_precision', None) + + +class Field(msrest.serialization.Model): + """Represents a field in an index definition, which describes the name, data type, and search behavior of a field. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the field, which must be unique within the fields collection + of the index or parent field. + :type name: str + :param type: Required. The data type of the field. Possible values include: 'Edm.String', + 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', + 'Edm.GeographyPoint', 'Edm.ComplexType'. + :type type: str or ~search_service_client.models.DataType + :param key: A value indicating whether the field uniquely identifies documents in the index. + Exactly one top-level field in each index must be chosen as the key field and it must be of + type Edm.String. Key fields can be used to look up documents directly and update or delete + specific documents. Default is false for simple fields and null for complex fields. + :type key: bool + :param retrievable: A value indicating whether the field can be returned in a search result. + You can disable this option if you want to use a field (for example, margin) as a filter, + sorting, or scoring mechanism but do not want the field to be visible to the end user. This + property must be true for key fields, and it must be null for complex fields. This property can + be changed on existing fields. Enabling this property does not cause any increase in index + storage requirements. Default is true for simple fields and null for complex fields. + :type retrievable: bool + :param searchable: A value indicating whether the field is full-text searchable. This means it + will undergo analysis such as word-breaking during indexing. If you set a searchable field to a + value like "sunny day", internally it will be split into the individual tokens "sunny" and + "day". This enables full-text searches for these terms. Fields of type Edm.String or + Collection(Edm.String) are searchable by default. This property must be false for simple fields + of other non-string data types, and it must be null for complex fields. Note: searchable fields + consume extra space in your index since Azure Cognitive Search will store an additional + tokenized version of the field value for full-text searches. If you want to save space in your + index and you don't need a field to be included in searches, set searchable to false. + :type searchable: bool + :param filterable: A value indicating whether to enable the field to be referenced in $filter + queries. filterable differs from searchable in how strings are handled. Fields of type + Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so + comparisons are for exact matches only. For example, if you set such a field f to "sunny day", + $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property + must be null for complex fields. Default is true for simple fields and null for complex fields. + :type filterable: bool + :param sortable: A value indicating whether to enable the field to be referenced in $orderby + expressions. By default Azure Cognitive Search sorts results by score, but in many experiences + users will want to sort by fields in the documents. A simple field can be sortable only if it + is single-valued (it has a single value in the scope of the parent document). Simple collection + fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex + collections are also multi-valued, and therefore cannot be sortable. This is true whether it's + an immediate parent field, or an ancestor field, that's the complex collection. Complex fields + cannot be sortable and the sortable property must be null for such fields. The default for + sortable is true for single-valued simple fields, false for multi-valued simple fields, and + null for complex fields. + :type sortable: bool + :param facetable: A value indicating whether to enable the field to be referenced in facet + queries. Typically used in a presentation of search results that includes hit count by category + (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so + on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or + Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple + fields. + :type facetable: bool + :param analyzer: The name of the language analyzer to use for the field. This option can be + used only with searchable fields and it can't be set together with either searchAnalyzer or + indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null + for complex fields. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', + 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- + Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', + 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', + 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', + 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', + 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', + 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', + 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', + 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', + 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- + PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', + 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', + 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', + 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', + 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', + 'simple', 'stop', 'whitespace'. + :type analyzer: str or ~search_service_client.models.AnalyzerName + :param search_analyzer: The name of the analyzer used at search time for the field. This option + can be used only with searchable fields. It must be set together with indexAnalyzer and it + cannot be set together with the analyzer option. This analyzer can be updated on an existing + field. Must be null for complex fields. Possible values include: 'ar.microsoft', 'ar.lucene', + 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', + 'ca.lucene', 'zh-Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', + 'hr.microsoft', 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', + 'nl.lucene', 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', + 'fr.microsoft', 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', + 'el.lucene', 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', + 'hu.lucene', 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', + 'it.lucene', 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', + 'lv.microsoft', 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', + 'nb.microsoft', 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt- + BR.lucene', 'pt-PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', + 'ru.microsoft', 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', + 'sl.microsoft', 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', + 'te.microsoft', 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', + 'ur.microsoft', 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', + 'pattern', 'simple', 'stop', 'whitespace'. + :type search_analyzer: str or ~search_service_client.models.AnalyzerName + :param index_analyzer: The name of the analyzer used at indexing time for the field. This + option can be used only with searchable fields. It must be set together with searchAnalyzer and + it cannot be set together with the analyzer option. Once the analyzer is chosen, it cannot be + changed for the field. Must be null for complex fields. Possible values include: + 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft', + 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh-Hans.lucene', 'zh- + Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', 'cs.lucene', 'da.microsoft', + 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', 'en.lucene', 'et.microsoft', + 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', 'gl.lucene', 'de.microsoft', + 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', 'he.microsoft', 'hi.microsoft', + 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', 'id.microsoft', 'id.lucene', + 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', 'ja.lucene', 'kn.microsoft', + 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', 'lt.microsoft', 'ml.microsoft', + 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', 'fa.lucene', 'pl.microsoft', + 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', 'pt-PT.lucene', + 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- + cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', + 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', + 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', + 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', + 'whitespace'. + :type index_analyzer: str or ~search_service_client.models.AnalyzerName + :param synonym_maps: A list of the names of synonym maps to associate with this field. This + option can be used only with searchable fields. Currently only one synonym map per field is + supported. Assigning a synonym map to a field ensures that query terms targeting that field are + expanded at query-time using the rules in the synonym map. This attribute can be changed on + existing fields. Must be null or an empty collection for complex fields. + :type synonym_maps: list[str] + :param fields: A list of sub-fields if this is a field of type Edm.ComplexType or + Collection(Edm.ComplexType). Must be null or empty for simple fields. + :type fields: list[~search_service_client.models.Field] + """ + + _validation = { + 'name': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'key': {'key': 'key', 'type': 'bool'}, + 'retrievable': {'key': 'retrievable', 'type': 'bool'}, + 'searchable': {'key': 'searchable', 'type': 'bool'}, + 'filterable': {'key': 'filterable', 'type': 'bool'}, + 'sortable': {'key': 'sortable', 'type': 'bool'}, + 'facetable': {'key': 'facetable', 'type': 'bool'}, + 'analyzer': {'key': 'analyzer', 'type': 'str'}, + 'search_analyzer': {'key': 'searchAnalyzer', 'type': 'str'}, + 'index_analyzer': {'key': 'indexAnalyzer', 'type': 'str'}, + 'synonym_maps': {'key': 'synonymMaps', 'type': '[str]'}, + 'fields': {'key': 'fields', 'type': '[Field]'}, + } + + def __init__( + self, + **kwargs + ): + super(Field, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.type = kwargs.get('type', None) + self.key = kwargs.get('key', None) + self.retrievable = kwargs.get('retrievable', None) + self.searchable = kwargs.get('searchable', None) + self.filterable = kwargs.get('filterable', None) + self.sortable = kwargs.get('sortable', None) + self.facetable = kwargs.get('facetable', None) + self.analyzer = kwargs.get('analyzer', None) + self.search_analyzer = kwargs.get('search_analyzer', None) + self.index_analyzer = kwargs.get('index_analyzer', None) + self.synonym_maps = kwargs.get('synonym_maps', None) + self.fields = kwargs.get('fields', None) + + +class FieldMapping(msrest.serialization.Model): + """Defines a mapping between a field in a data source and a target field in an index. + + All required parameters must be populated in order to send to Azure. + + :param source_field_name: Required. The name of the field in the data source. + :type source_field_name: str + :param target_field_name: The name of the target field in the index. Same as the source field + name by default. + :type target_field_name: str + :param mapping_function: A function to apply to each source field value before indexing. + :type mapping_function: ~search_service_client.models.FieldMappingFunction + """ + + _validation = { + 'source_field_name': {'required': True}, + } + + _attribute_map = { + 'source_field_name': {'key': 'sourceFieldName', 'type': 'str'}, + 'target_field_name': {'key': 'targetFieldName', 'type': 'str'}, + 'mapping_function': {'key': 'mappingFunction', 'type': 'FieldMappingFunction'}, + } + + def __init__( + self, + **kwargs + ): + super(FieldMapping, self).__init__(**kwargs) + self.source_field_name = kwargs.get('source_field_name', None) + self.target_field_name = kwargs.get('target_field_name', None) + self.mapping_function = kwargs.get('mapping_function', None) + + +class FieldMappingFunction(msrest.serialization.Model): + """Represents a function that transforms a value from a data source before indexing. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the field mapping function. + :type name: str + :param parameters: A dictionary of parameter name/value pairs to pass to the function. Each + value must be of a primitive type. + :type parameters: dict[str, object] + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '{object}'}, + } + + def __init__( + self, + **kwargs + ): + super(FieldMappingFunction, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.parameters = kwargs.get('parameters', None) + + +class FreshnessScoringFunction(ScoringFunction): + """Defines a function that boosts scores based on the value of a date-time field. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param field_name: Required. The name of the field used as input to the scoring function. + :type field_name: str + :param boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :type boost: float + :param interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', + 'logarithmic'. + :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + :param parameters: Required. Parameter values for the freshness scoring function. + :type parameters: ~search_service_client.models.FreshnessScoringParameters + """ + + _validation = { + 'type': {'required': True}, + 'field_name': {'required': True}, + 'boost': {'required': True}, + 'parameters': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'field_name': {'key': 'fieldName', 'type': 'str'}, + 'boost': {'key': 'boost', 'type': 'float'}, + 'interpolation': {'key': 'interpolation', 'type': 'str'}, + 'parameters': {'key': 'freshness', 'type': 'FreshnessScoringParameters'}, + } + + def __init__( + self, + **kwargs + ): + super(FreshnessScoringFunction, self).__init__(**kwargs) + self.type = 'freshness' + self.parameters = kwargs.get('parameters', None) + + +class FreshnessScoringParameters(msrest.serialization.Model): + """Provides parameter values to a freshness scoring function. + + All required parameters must be populated in order to send to Azure. + + :param boosting_duration: Required. The expiration period after which boosting will stop for a + particular document. + :type boosting_duration: ~datetime.timedelta + """ + + _validation = { + 'boosting_duration': {'required': True}, + } + + _attribute_map = { + 'boosting_duration': {'key': 'boostingDuration', 'type': 'duration'}, + } + + def __init__( + self, + **kwargs + ): + super(FreshnessScoringParameters, self).__init__(**kwargs) + self.boosting_duration = kwargs.get('boosting_duration', None) + + +class GetIndexStatisticsResult(msrest.serialization.Model): + """Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar document_count: Required. The number of documents in the index. + :vartype document_count: long + :ivar storage_size: Required. The amount of storage in bytes consumed by the index. + :vartype storage_size: long + """ + + _validation = { + 'document_count': {'required': True, 'readonly': True}, + 'storage_size': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'document_count': {'key': 'documentCount', 'type': 'long'}, + 'storage_size': {'key': 'storageSize', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(GetIndexStatisticsResult, self).__init__(**kwargs) + self.document_count = None + self.storage_size = None + + +class HighWaterMarkChangeDetectionPolicy(DataChangeDetectionPolicy): + """Defines a data change detection policy that captures changes based on the value of a high water mark column. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param high_water_mark_column_name: Required. The name of the high water mark column. + :type high_water_mark_column_name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'high_water_mark_column_name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'high_water_mark_column_name': {'key': 'highWaterMarkColumnName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(HighWaterMarkChangeDetectionPolicy, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' + self.high_water_mark_column_name = kwargs.get('high_water_mark_column_name', None) + + +class ImageAnalysisSkill(Skill): + """A skill that analyzes image files. It extracts a rich set of visual features based on the image content. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param default_language_code: A value indicating which language code to use. Default is en. + Possible values include: 'en', 'es', 'ja', 'pt', 'zh'. + :type default_language_code: str or ~search_service_client.models.ImageAnalysisSkillLanguage + :param visual_features: A list of visual features. + :type visual_features: list[str or ~search_service_client.models.VisualFeature] + :param details: A string indicating which domain-specific details to return. + :type details: list[str or ~search_service_client.models.ImageDetail] + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'}, + 'visual_features': {'key': 'visualFeatures', 'type': '[str]'}, + 'details': {'key': 'details', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(ImageAnalysisSkill, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Skills.Vision.ImageAnalysisSkill' + self.default_language_code = kwargs.get('default_language_code', None) + self.visual_features = kwargs.get('visual_features', None) + self.details = kwargs.get('details', None) + + +class Index(msrest.serialization.Model): + """Represents a search index definition, which describes the fields and search behavior of an index. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the index. + :type name: str + :param fields: Required. The fields of the index. + :type fields: list[~search_service_client.models.Field] + :param scoring_profiles: The scoring profiles for the index. + :type scoring_profiles: list[~search_service_client.models.ScoringProfile] + :param default_scoring_profile: The name of the scoring profile to use if none is specified in + the query. If this property is not set and no scoring profile is specified in the query, then + default scoring (tf-idf) will be used. + :type default_scoring_profile: str + :param cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. + :type cors_options: ~search_service_client.models.CorsOptions + :param suggesters: The suggesters for the index. + :type suggesters: list[~search_service_client.models.Suggester] + :param analyzers: The analyzers for the index. + :type analyzers: list[~search_service_client.models.Analyzer] + :param tokenizers: The tokenizers for the index. + :type tokenizers: list[~search_service_client.models.Tokenizer] + :param token_filters: The token filters for the index. + :type token_filters: list[~search_service_client.models.TokenFilter] + :param char_filters: The character filters for the index. + :type char_filters: list[~search_service_client.models.CharFilter] + :param encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your data when you + want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive + Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive + Search will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with + customer-managed keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :type encryption_key: ~search_service_client.models.EncryptionKey + :param e_tag: The ETag of the index. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'fields': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'fields': {'key': 'fields', 'type': '[Field]'}, + 'scoring_profiles': {'key': 'scoringProfiles', 'type': '[ScoringProfile]'}, + 'default_scoring_profile': {'key': 'defaultScoringProfile', 'type': 'str'}, + 'cors_options': {'key': 'corsOptions', 'type': 'CorsOptions'}, + 'suggesters': {'key': 'suggesters', 'type': '[Suggester]'}, + 'analyzers': {'key': 'analyzers', 'type': '[Analyzer]'}, + 'tokenizers': {'key': 'tokenizers', 'type': '[Tokenizer]'}, + 'token_filters': {'key': 'tokenFilters', 'type': '[TokenFilter]'}, + 'char_filters': {'key': 'charFilters', 'type': '[CharFilter]'}, + 'encryption_key': {'key': 'encryptionKey', 'type': 'EncryptionKey'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(Index, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.fields = kwargs.get('fields', None) + self.scoring_profiles = kwargs.get('scoring_profiles', None) + self.default_scoring_profile = kwargs.get('default_scoring_profile', None) + self.cors_options = kwargs.get('cors_options', None) + self.suggesters = kwargs.get('suggesters', None) + self.analyzers = kwargs.get('analyzers', None) + self.tokenizers = kwargs.get('tokenizers', None) + self.token_filters = kwargs.get('token_filters', None) + self.char_filters = kwargs.get('char_filters', None) + self.encryption_key = kwargs.get('encryption_key', None) + self.e_tag = kwargs.get('e_tag', None) + + +class Indexer(msrest.serialization.Model): + """Represents an indexer. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the indexer. + :type name: str + :param description: The description of the indexer. + :type description: str + :param data_source_name: Required. The name of the datasource from which this indexer reads + data. + :type data_source_name: str + :param skillset_name: The name of the skillset executing with this indexer. + :type skillset_name: str + :param target_index_name: Required. The name of the index to which this indexer writes data. + :type target_index_name: str + :param schedule: The schedule for this indexer. + :type schedule: ~search_service_client.models.IndexingSchedule + :param parameters: Parameters for indexer execution. + :type parameters: ~search_service_client.models.IndexingParameters + :param field_mappings: Defines mappings between fields in the data source and corresponding + target fields in the index. + :type field_mappings: list[~search_service_client.models.FieldMapping] + :param output_field_mappings: Output field mappings are applied after enrichment and + immediately before indexing. + :type output_field_mappings: list[~search_service_client.models.FieldMapping] + :param is_disabled: A value indicating whether the indexer is disabled. Default is false. + :type is_disabled: bool + :param e_tag: The ETag of the Indexer. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'data_source_name': {'required': True}, + 'target_index_name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'data_source_name': {'key': 'dataSourceName', 'type': 'str'}, + 'skillset_name': {'key': 'skillsetName', 'type': 'str'}, + 'target_index_name': {'key': 'targetIndexName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'IndexingSchedule'}, + 'parameters': {'key': 'parameters', 'type': 'IndexingParameters'}, + 'field_mappings': {'key': 'fieldMappings', 'type': '[FieldMapping]'}, + 'output_field_mappings': {'key': 'outputFieldMappings', 'type': '[FieldMapping]'}, + 'is_disabled': {'key': 'disabled', 'type': 'bool'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(Indexer, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.description = kwargs.get('description', None) + self.data_source_name = kwargs.get('data_source_name', None) + self.skillset_name = kwargs.get('skillset_name', None) + self.target_index_name = kwargs.get('target_index_name', None) + self.schedule = kwargs.get('schedule', None) + self.parameters = kwargs.get('parameters', None) + self.field_mappings = kwargs.get('field_mappings', None) + self.output_field_mappings = kwargs.get('output_field_mappings', None) + self.is_disabled = kwargs.get('is_disabled', False) + self.e_tag = kwargs.get('e_tag', None) + + +class IndexerExecutionInfo(msrest.serialization.Model): + """Represents the current status and execution history of an indexer. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar status: Required. Overall indexer status. Possible values include: 'unknown', 'error', + 'running'. + :vartype status: str or ~search_service_client.models.IndexerStatus + :ivar last_result: The result of the most recent or an in-progress indexer execution. + :vartype last_result: ~search_service_client.models.IndexerExecutionResult + :ivar execution_history: Required. History of the recent indexer executions, sorted in reverse + chronological order. + :vartype execution_history: list[~search_service_client.models.IndexerExecutionResult] + :ivar limits: Required. The execution limits for the indexer. + :vartype limits: ~search_service_client.models.IndexerLimits + """ + + _validation = { + 'status': {'required': True, 'readonly': True}, + 'last_result': {'readonly': True}, + 'execution_history': {'required': True, 'readonly': True}, + 'limits': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'last_result': {'key': 'lastResult', 'type': 'IndexerExecutionResult'}, + 'execution_history': {'key': 'executionHistory', 'type': '[IndexerExecutionResult]'}, + 'limits': {'key': 'limits', 'type': 'IndexerLimits'}, + } + + def __init__( + self, + **kwargs + ): + super(IndexerExecutionInfo, self).__init__(**kwargs) + self.status = None + self.last_result = None + self.execution_history = None + self.limits = None + + +class IndexerExecutionResult(msrest.serialization.Model): + """Represents the result of an individual indexer execution. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar status: Required. The outcome of this indexer execution. Possible values include: + 'transientFailure', 'success', 'inProgress', 'reset'. + :vartype status: str or ~search_service_client.models.IndexerExecutionStatus + :ivar error_message: The error message indicating the top-level error, if any. + :vartype error_message: str + :ivar start_time: The start time of this indexer execution. + :vartype start_time: ~datetime.datetime + :ivar end_time: The end time of this indexer execution, if the execution has already completed. + :vartype end_time: ~datetime.datetime + :ivar errors: Required. The item-level indexing errors. + :vartype errors: list[~search_service_client.models.ItemError] + :ivar warnings: Required. The item-level indexing warnings. + :vartype warnings: list[~search_service_client.models.ItemWarning] + :ivar item_count: Required. The number of items that were processed during this indexer + execution. This includes both successfully processed items and items where indexing was + attempted but failed. + :vartype item_count: int + :ivar failed_item_count: Required. The number of items that failed to be indexed during this + indexer execution. + :vartype failed_item_count: int + :ivar initial_tracking_state: Change tracking state with which an indexer execution started. + :vartype initial_tracking_state: str + :ivar final_tracking_state: Change tracking state with which an indexer execution finished. + :vartype final_tracking_state: str + """ + + _validation = { + 'status': {'required': True, 'readonly': True}, + 'error_message': {'readonly': True}, + 'start_time': {'readonly': True}, + 'end_time': {'readonly': True}, + 'errors': {'required': True, 'readonly': True}, + 'warnings': {'required': True, 'readonly': True}, + 'item_count': {'required': True, 'readonly': True}, + 'failed_item_count': {'required': True, 'readonly': True}, + 'initial_tracking_state': {'readonly': True}, + 'final_tracking_state': {'readonly': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'errors': {'key': 'errors', 'type': '[ItemError]'}, + 'warnings': {'key': 'warnings', 'type': '[ItemWarning]'}, + 'item_count': {'key': 'itemsProcessed', 'type': 'int'}, + 'failed_item_count': {'key': 'itemsFailed', 'type': 'int'}, + 'initial_tracking_state': {'key': 'initialTrackingState', 'type': 'str'}, + 'final_tracking_state': {'key': 'finalTrackingState', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(IndexerExecutionResult, self).__init__(**kwargs) + self.status = None + self.error_message = None + self.start_time = None + self.end_time = None + self.errors = None + self.warnings = None + self.item_count = None + self.failed_item_count = None + self.initial_tracking_state = None + self.final_tracking_state = None + + +class IndexerLimits(msrest.serialization.Model): + """IndexerLimits. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar max_run_time: The maximum duration that the indexer is permitted to run for one + execution. + :vartype max_run_time: ~datetime.timedelta + :ivar max_document_extraction_size: The maximum size of a document, in bytes, which will be + considered valid for indexing. + :vartype max_document_extraction_size: long + :ivar max_document_content_characters_to_extract: The maximum number of characters that will be + extracted from a document picked up for indexing. + :vartype max_document_content_characters_to_extract: long + """ + + _validation = { + 'max_run_time': {'readonly': True}, + 'max_document_extraction_size': {'readonly': True}, + 'max_document_content_characters_to_extract': {'readonly': True}, + } + + _attribute_map = { + 'max_run_time': {'key': 'maxRunTime', 'type': 'duration'}, + 'max_document_extraction_size': {'key': 'maxDocumentExtractionSize', 'type': 'long'}, + 'max_document_content_characters_to_extract': {'key': 'maxDocumentContentCharactersToExtract', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(IndexerLimits, self).__init__(**kwargs) + self.max_run_time = None + self.max_document_extraction_size = None + self.max_document_content_characters_to_extract = None + + +class IndexingParameters(msrest.serialization.Model): + """Represents parameters for indexer execution. + + :param batch_size: The number of items that are read from the data source and indexed as a + single batch in order to improve performance. The default depends on the data source type. + :type batch_size: int + :param max_failed_items: The maximum number of items that can fail indexing for indexer + execution to still be considered successful. -1 means no limit. Default is 0. + :type max_failed_items: int + :param max_failed_items_per_batch: The maximum number of items in a single batch that can fail + indexing for the batch to still be considered successful. -1 means no limit. Default is 0. + :type max_failed_items_per_batch: int + :param configuration: A dictionary of indexer-specific configuration properties. Each name is + the name of a specific property. Each value must be of a primitive type. + :type configuration: dict[str, object] + """ + + _attribute_map = { + 'batch_size': {'key': 'batchSize', 'type': 'int'}, + 'max_failed_items': {'key': 'maxFailedItems', 'type': 'int'}, + 'max_failed_items_per_batch': {'key': 'maxFailedItemsPerBatch', 'type': 'int'}, + 'configuration': {'key': 'configuration', 'type': '{object}'}, + } + + def __init__( + self, + **kwargs + ): + super(IndexingParameters, self).__init__(**kwargs) + self.batch_size = kwargs.get('batch_size', None) + self.max_failed_items = kwargs.get('max_failed_items', 0) + self.max_failed_items_per_batch = kwargs.get('max_failed_items_per_batch', 0) + self.configuration = kwargs.get('configuration', None) + + +class IndexingSchedule(msrest.serialization.Model): + """Represents a schedule for indexer execution. + + All required parameters must be populated in order to send to Azure. + + :param interval: Required. The interval of time between indexer executions. + :type interval: ~datetime.timedelta + :param start_time: The time when an indexer should start running. + :type start_time: ~datetime.datetime + """ + + _validation = { + 'interval': {'required': True}, + } + + _attribute_map = { + 'interval': {'key': 'interval', 'type': 'duration'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + } + + def __init__( + self, + **kwargs + ): + super(IndexingSchedule, self).__init__(**kwargs) + self.interval = kwargs.get('interval', None) + self.start_time = kwargs.get('start_time', None) + + +class InputFieldMappingEntry(msrest.serialization.Model): + """Input field mapping for a skill. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the input. + :type name: str + :param source: The source of the input. + :type source: str + :param source_context: The source context used for selecting recursive inputs. + :type source_context: str + :param inputs: The recursive inputs used when creating a complex type. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'source': {'key': 'source', 'type': 'str'}, + 'source_context': {'key': 'sourceContext', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + } + + def __init__( + self, + **kwargs + ): + super(InputFieldMappingEntry, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.source = kwargs.get('source', None) + self.source_context = kwargs.get('source_context', None) + self.inputs = kwargs.get('inputs', None) + + +class ItemError(msrest.serialization.Model): + """Represents an item- or document-level indexing error. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar key: The key of the item for which indexing failed. + :vartype key: str + :ivar error_message: Required. The message describing the error that occurred while processing + the item. + :vartype error_message: str + :ivar status_code: Required. The status code indicating why the indexing operation failed. + Possible values include: 400 for a malformed input document, 404 for document not found, 409 + for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the + service is too busy. + :vartype status_code: int + :ivar name: The name of the source at which the error originated. For example, this could refer + to a particular skill in the attached skillset. This may not be always available. + :vartype name: str + :ivar details: Additional, verbose details about the error to assist in debugging the indexer. + This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of errors. This + may not be always available. + :vartype documentation_link: str + """ + + _validation = { + 'key': {'readonly': True}, + 'error_message': {'required': True, 'readonly': True}, + 'status_code': {'required': True, 'readonly': True}, + 'name': {'readonly': True}, + 'details': {'readonly': True}, + 'documentation_link': {'readonly': True}, + } + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + 'status_code': {'key': 'statusCode', 'type': 'int'}, + 'name': {'key': 'name', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ItemError, self).__init__(**kwargs) + self.key = None + self.error_message = None + self.status_code = None + self.name = None + self.details = None + self.documentation_link = None + + +class ItemWarning(msrest.serialization.Model): + """Represents an item-level warning. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar key: The key of the item which generated a warning. + :vartype key: str + :ivar message: Required. The message describing the warning that occurred while processing the + item. + :vartype message: str + :ivar name: The name of the source at which the warning originated. For example, this could + refer to a particular skill in the attached skillset. This may not be always available. + :vartype name: str + :ivar details: Additional, verbose details about the warning to assist in debugging the + indexer. This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of warnings. This + may not be always available. + :vartype documentation_link: str + """ + + _validation = { + 'key': {'readonly': True}, + 'message': {'required': True, 'readonly': True}, + 'name': {'readonly': True}, + 'details': {'readonly': True}, + 'documentation_link': {'readonly': True}, + } + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ItemWarning, self).__init__(**kwargs) + self.key = None + self.message = None + self.name = None + self.details = None + self.documentation_link = None + + +class KeepTokenFilter(TokenFilter): + """A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param keep_words: Required. The list of words to keep. + :type keep_words: list[str] + :param lower_case_keep_words: A value indicating whether to lower case all words first. Default + is false. + :type lower_case_keep_words: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'keep_words': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'keep_words': {'key': 'keepWords', 'type': '[str]'}, + 'lower_case_keep_words': {'key': 'keepWordsCase', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(KeepTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.KeepTokenFilter' + self.keep_words = kwargs.get('keep_words', None) + self.lower_case_keep_words = kwargs.get('lower_case_keep_words', False) + + +class KeyPhraseExtractionSkill(Skill): + """A skill that uses text analytics for key phrase extraction. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param default_language_code: A value indicating which language code to use. Default is en. + Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'it', 'ja', 'ko', 'no', 'pl', 'pt- + PT', 'pt-BR', 'ru', 'es', 'sv'. + :type default_language_code: str or + ~search_service_client.models.KeyPhraseExtractionSkillLanguage + :param max_key_phrase_count: A number indicating how many key phrases to return. If absent, all + identified key phrases will be returned. + :type max_key_phrase_count: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'}, + 'max_key_phrase_count': {'key': 'maxKeyPhraseCount', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(KeyPhraseExtractionSkill, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Skills.Text.KeyPhraseExtractionSkill' + self.default_language_code = kwargs.get('default_language_code', None) + self.max_key_phrase_count = kwargs.get('max_key_phrase_count', None) + + +class KeywordMarkerTokenFilter(TokenFilter): + """Marks terms as keywords. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param keywords: Required. A list of words to mark as keywords. + :type keywords: list[str] + :param ignore_case: A value indicating whether to ignore case. If true, all words are converted + to lower case first. Default is false. + :type ignore_case: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'keywords': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'keywords': {'key': 'keywords', 'type': '[str]'}, + 'ignore_case': {'key': 'ignoreCase', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(KeywordMarkerTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' + self.keywords = kwargs.get('keywords', None) + self.ignore_case = kwargs.get('ignore_case', False) + + +class KeywordTokenizer(Tokenizer): + """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param buffer_size: The read buffer size in bytes. Default is 256. + :type buffer_size: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'buffer_size': {'key': 'bufferSize', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(KeywordTokenizer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizer' + self.buffer_size = kwargs.get('buffer_size', 256) + + +class KeywordTokenizerV2(Tokenizer): + """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 256. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(KeywordTokenizerV2, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizerV2' + self.max_token_length = kwargs.get('max_token_length', 256) + + +class LanguageDetectionSkill(Skill): + """A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + } + + def __init__( + self, + **kwargs + ): + super(LanguageDetectionSkill, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Skills.Text.LanguageDetectionSkill' + + +class LengthTokenFilter(TokenFilter): + """Removes words that are too long or too short. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param min: The minimum length in characters. Default is 0. Maximum is 300. Must be less than + the value of max. + :type min: int + :param max: The maximum length in characters. Default and maximum is 300. + :type max: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'min': {'maximum': 300}, + 'max': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min': {'key': 'min', 'type': 'int'}, + 'max': {'key': 'max', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(LengthTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.LengthTokenFilter' + self.min = kwargs.get('min', 0) + self.max = kwargs.get('max', 300) + + +class LimitTokenFilter(TokenFilter): + """Limits the number of tokens while indexing. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param max_token_count: The maximum number of tokens to produce. Default is 1. + :type max_token_count: int + :param consume_all_tokens: A value indicating whether all tokens from the input must be + consumed even if maxTokenCount is reached. Default is false. + :type consume_all_tokens: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_count': {'key': 'maxTokenCount', 'type': 'int'}, + 'consume_all_tokens': {'key': 'consumeAllTokens', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(LimitTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.LimitTokenFilter' + self.max_token_count = kwargs.get('max_token_count', 1) + self.consume_all_tokens = kwargs.get('consume_all_tokens', False) + + +class ListDataSourcesResult(msrest.serialization.Model): + """Response from a List Datasources request. If successful, it includes the full definitions of all datasources. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar data_sources: Required. The datasources in the Search service. + :vartype data_sources: list[~search_service_client.models.DataSource] + """ + + _validation = { + 'data_sources': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'data_sources': {'key': 'value', 'type': '[DataSource]'}, + } + + def __init__( + self, + **kwargs + ): + super(ListDataSourcesResult, self).__init__(**kwargs) + self.data_sources = None + + +class ListIndexersResult(msrest.serialization.Model): + """Response from a List Indexers request. If successful, it includes the full definitions of all indexers. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar indexers: Required. The indexers in the Search service. + :vartype indexers: list[~search_service_client.models.Indexer] + """ + + _validation = { + 'indexers': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'indexers': {'key': 'value', 'type': '[Indexer]'}, + } + + def __init__( + self, + **kwargs + ): + super(ListIndexersResult, self).__init__(**kwargs) + self.indexers = None + + +class ListIndexesResult(msrest.serialization.Model): + """Response from a List Indexes request. If successful, it includes the full definitions of all indexes. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar indexes: Required. The indexes in the Search service. + :vartype indexes: list[~search_service_client.models.Index] + """ + + _validation = { + 'indexes': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'indexes': {'key': 'value', 'type': '[Index]'}, + } + + def __init__( + self, + **kwargs + ): + super(ListIndexesResult, self).__init__(**kwargs) + self.indexes = None + + +class ListSkillsetsResult(msrest.serialization.Model): + """Response from a list Skillset request. If successful, it includes the full definitions of all skillsets. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar skillsets: Required. The skillsets defined in the Search service. + :vartype skillsets: list[~search_service_client.models.Skillset] + """ + + _validation = { + 'skillsets': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'skillsets': {'key': 'value', 'type': '[Skillset]'}, + } + + def __init__( + self, + **kwargs + ): + super(ListSkillsetsResult, self).__init__(**kwargs) + self.skillsets = None + + +class ListSynonymMapsResult(msrest.serialization.Model): + """Response from a List SynonymMaps request. If successful, it includes the full definitions of all synonym maps. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar synonym_maps: Required. The synonym maps in the Search service. + :vartype synonym_maps: list[~search_service_client.models.SynonymMap] + """ + + _validation = { + 'synonym_maps': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'synonym_maps': {'key': 'value', 'type': '[SynonymMap]'}, + } + + def __init__( + self, + **kwargs + ): + super(ListSynonymMapsResult, self).__init__(**kwargs) + self.synonym_maps = None + + +class MagnitudeScoringFunction(ScoringFunction): + """Defines a function that boosts scores based on the magnitude of a numeric field. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param field_name: Required. The name of the field used as input to the scoring function. + :type field_name: str + :param boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :type boost: float + :param interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', + 'logarithmic'. + :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + :param parameters: Required. Parameter values for the magnitude scoring function. + :type parameters: ~search_service_client.models.MagnitudeScoringParameters + """ + + _validation = { + 'type': {'required': True}, + 'field_name': {'required': True}, + 'boost': {'required': True}, + 'parameters': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'field_name': {'key': 'fieldName', 'type': 'str'}, + 'boost': {'key': 'boost', 'type': 'float'}, + 'interpolation': {'key': 'interpolation', 'type': 'str'}, + 'parameters': {'key': 'magnitude', 'type': 'MagnitudeScoringParameters'}, + } + + def __init__( + self, + **kwargs + ): + super(MagnitudeScoringFunction, self).__init__(**kwargs) + self.type = 'magnitude' + self.parameters = kwargs.get('parameters', None) + + +class MagnitudeScoringParameters(msrest.serialization.Model): + """Provides parameter values to a magnitude scoring function. + + All required parameters must be populated in order to send to Azure. + + :param boosting_range_start: Required. The field value at which boosting starts. + :type boosting_range_start: float + :param boosting_range_end: Required. The field value at which boosting ends. + :type boosting_range_end: float + :param should_boost_beyond_range_by_constant: A value indicating whether to apply a constant + boost for field values beyond the range end value; default is false. + :type should_boost_beyond_range_by_constant: bool + """ + + _validation = { + 'boosting_range_start': {'required': True}, + 'boosting_range_end': {'required': True}, + } + + _attribute_map = { + 'boosting_range_start': {'key': 'boostingRangeStart', 'type': 'float'}, + 'boosting_range_end': {'key': 'boostingRangeEnd', 'type': 'float'}, + 'should_boost_beyond_range_by_constant': {'key': 'constantBoostBeyondRange', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(MagnitudeScoringParameters, self).__init__(**kwargs) + self.boosting_range_start = kwargs.get('boosting_range_start', None) + self.boosting_range_end = kwargs.get('boosting_range_end', None) + self.should_boost_beyond_range_by_constant = kwargs.get('should_boost_beyond_range_by_constant', None) + + +class MappingCharFilter(CharFilter): + """A character filter that applies mappings defined with the mappings option. Matching is greedy (longest pattern matching at a given point wins). Replacement is allowed to be the empty string. This character filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the char filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param mappings: Required. A list of mappings of the following format: "a=>b" (all occurrences + of the character "a" will be replaced with character "b"). + :type mappings: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'mappings': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'mappings': {'key': 'mappings', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(MappingCharFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.MappingCharFilter' + self.mappings = kwargs.get('mappings', None) + + +class MergeSkill(Skill): + """A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is + an empty space. + :type insert_pre_tag: str + :param insert_post_tag: The tag indicates the end of the merged text. By default, the tag is an + empty space. + :type insert_post_tag: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'insert_pre_tag': {'key': 'insertPreTag', 'type': 'str'}, + 'insert_post_tag': {'key': 'insertPostTag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MergeSkill, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Skills.Text.MergeSkill' + self.insert_pre_tag = kwargs.get('insert_pre_tag', " ") + self.insert_post_tag = kwargs.get('insert_post_tag', " ") + + +class MicrosoftLanguageStemmingTokenizer(Tokenizer): + """Divides text using language-specific rules and reduces words to their base forms. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Tokens longer than the maximum length are + split. Maximum token length that can be used is 300 characters. Tokens longer than 300 + characters are first split into tokens of length 300 and then each of those tokens is split + based on the max token length set. Default is 255. + :type max_token_length: int + :param is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used + as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. + :type is_search_tokenizer: bool + :param language: The language to use. The default is English. Possible values include: + 'arabic', 'bangla', 'bulgarian', 'catalan', 'croatian', 'czech', 'danish', 'dutch', 'english', + 'estonian', 'finnish', 'french', 'german', 'greek', 'gujarati', 'hebrew', 'hindi', 'hungarian', + 'icelandic', 'indonesian', 'italian', 'kannada', 'latvian', 'lithuanian', 'malay', 'malayalam', + 'marathi', 'norwegianBokmaal', 'polish', 'portuguese', 'portugueseBrazilian', 'punjabi', + 'romanian', 'russian', 'serbianCyrillic', 'serbianLatin', 'slovak', 'slovenian', 'spanish', + 'swedish', 'tamil', 'telugu', 'turkish', 'ukrainian', 'urdu'. + :type language: str or ~search_service_client.models.MicrosoftStemmingTokenizerLanguage + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + 'is_search_tokenizer': {'key': 'isSearchTokenizer', 'type': 'bool'}, + 'language': {'key': 'language', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MicrosoftLanguageStemmingTokenizer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer' + self.max_token_length = kwargs.get('max_token_length', 255) + self.is_search_tokenizer = kwargs.get('is_search_tokenizer', False) + self.language = kwargs.get('language', None) + + +class MicrosoftLanguageTokenizer(Tokenizer): + """Divides text using language-specific rules. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Tokens longer than the maximum length are + split. Maximum token length that can be used is 300 characters. Tokens longer than 300 + characters are first split into tokens of length 300 and then each of those tokens is split + based on the max token length set. Default is 255. + :type max_token_length: int + :param is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used + as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. + :type is_search_tokenizer: bool + :param language: The language to use. The default is English. Possible values include: + 'bangla', 'bulgarian', 'catalan', 'chineseSimplified', 'chineseTraditional', 'croatian', + 'czech', 'danish', 'dutch', 'english', 'french', 'german', 'greek', 'gujarati', 'hindi', + 'icelandic', 'indonesian', 'italian', 'japanese', 'kannada', 'korean', 'malay', 'malayalam', + 'marathi', 'norwegianBokmaal', 'polish', 'portuguese', 'portugueseBrazilian', 'punjabi', + 'romanian', 'russian', 'serbianCyrillic', 'serbianLatin', 'slovenian', 'spanish', 'swedish', + 'tamil', 'telugu', 'thai', 'ukrainian', 'urdu', 'vietnamese'. + :type language: str or ~search_service_client.models.MicrosoftTokenizerLanguage + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + 'is_search_tokenizer': {'key': 'isSearchTokenizer', 'type': 'bool'}, + 'language': {'key': 'language', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MicrosoftLanguageTokenizer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer' + self.max_token_length = kwargs.get('max_token_length', 255) + self.is_search_tokenizer = kwargs.get('is_search_tokenizer', False) + self.language = kwargs.get('language', None) + + +class NGramTokenFilter(TokenFilter): + """Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + maxGram. + :type min_gram: int + :param max_gram: The maximum n-gram length. Default is 2. + :type max_gram: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min_gram': {'key': 'minGram', 'type': 'int'}, + 'max_gram': {'key': 'maxGram', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(NGramTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilter' + self.min_gram = kwargs.get('min_gram', 1) + self.max_gram = kwargs.get('max_gram', 2) + + +class NGramTokenFilterV2(TokenFilter): + """Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :type min_gram: int + :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :type max_gram: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'min_gram': {'maximum': 300}, + 'max_gram': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min_gram': {'key': 'minGram', 'type': 'int'}, + 'max_gram': {'key': 'maxGram', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(NGramTokenFilterV2, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilterV2' + self.min_gram = kwargs.get('min_gram', 1) + self.max_gram = kwargs.get('max_gram', 2) + + +class NGramTokenizer(Tokenizer): + """Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :type min_gram: int + :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :type max_gram: int + :param token_chars: Character classes to keep in the tokens. + :type token_chars: list[str or ~search_service_client.models.TokenCharacterKind] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'min_gram': {'maximum': 300}, + 'max_gram': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min_gram': {'key': 'minGram', 'type': 'int'}, + 'max_gram': {'key': 'maxGram', 'type': 'int'}, + 'token_chars': {'key': 'tokenChars', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(NGramTokenizer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.NGramTokenizer' + self.min_gram = kwargs.get('min_gram', 1) + self.max_gram = kwargs.get('max_gram', 2) + self.token_chars = kwargs.get('token_chars', None) + + +class OcrSkill(Skill): + """A skill that extracts text from image files. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param text_extraction_algorithm: A value indicating which algorithm to use for extracting + text. Default is printed. Possible values include: 'printed', 'handwritten'. + :type text_extraction_algorithm: str or ~search_service_client.models.TextExtractionAlgorithm + :param default_language_code: A value indicating which language code to use. Default is en. + Possible values include: 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', + 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr- + Latn', 'sk'. + :type default_language_code: str or ~search_service_client.models.OcrSkillLanguage + :param should_detect_orientation: A value indicating to turn orientation detection on or not. + Default is false. + :type should_detect_orientation: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'text_extraction_algorithm': {'key': 'textExtractionAlgorithm', 'type': 'str'}, + 'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'}, + 'should_detect_orientation': {'key': 'detectOrientation', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(OcrSkill, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Skills.Vision.OcrSkill' + self.text_extraction_algorithm = kwargs.get('text_extraction_algorithm', None) + self.default_language_code = kwargs.get('default_language_code', None) + self.should_detect_orientation = kwargs.get('should_detect_orientation', False) + + +class OutputFieldMappingEntry(msrest.serialization.Model): + """Output field mapping for a skill. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the output defined by the skill. + :type name: str + :param target_name: The target name of the output. It is optional and default to name. + :type target_name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'target_name': {'key': 'targetName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(OutputFieldMappingEntry, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.target_name = kwargs.get('target_name', None) + + +class PathHierarchyTokenizerV2(Tokenizer): + """Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param delimiter: The delimiter character to use. Default is "/". + :type delimiter: str + :param replacement: A value that, if set, replaces the delimiter character. Default is "/". + :type replacement: str + :param max_token_length: The maximum token length. Default and maximum is 300. + :type max_token_length: int + :param reverse_token_order: A value indicating whether to generate tokens in reverse order. + Default is false. + :type reverse_token_order: bool + :param number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0. + :type number_of_tokens_to_skip: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'delimiter': {'key': 'delimiter', 'type': 'str'}, + 'replacement': {'key': 'replacement', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + 'reverse_token_order': {'key': 'reverse', 'type': 'bool'}, + 'number_of_tokens_to_skip': {'key': 'skip', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(PathHierarchyTokenizerV2, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.PathHierarchyTokenizerV2' + self.delimiter = kwargs.get('delimiter', "/") + self.replacement = kwargs.get('replacement', "/") + self.max_token_length = kwargs.get('max_token_length', 300) + self.reverse_token_order = kwargs.get('reverse_token_order', False) + self.number_of_tokens_to_skip = kwargs.get('number_of_tokens_to_skip', 0) + + +class PatternAnalyzer(Analyzer): + """Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param lower_case_terms: A value indicating whether terms should be lower-cased. Default is + true. + :type lower_case_terms: bool + :param pattern: A regular expression pattern to match token separators. Default is an + expression that matches one or more whitespace characters. + :type pattern: str + :param flags: Regular expression flags. Possible values include: 'CANON_EQ', + 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. + :type flags: str or ~search_service_client.models.RegexFlags + :param stopwords: A list of stopwords. + :type stopwords: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'lower_case_terms': {'key': 'lowercase', 'type': 'bool'}, + 'pattern': {'key': 'pattern', 'type': 'str'}, + 'flags': {'key': 'flags', 'type': 'str'}, + 'stopwords': {'key': 'stopwords', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(PatternAnalyzer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.PatternAnalyzer' + self.lower_case_terms = kwargs.get('lower_case_terms', True) + self.pattern = kwargs.get('pattern', "\W+") + self.flags = kwargs.get('flags', None) + self.stopwords = kwargs.get('stopwords', None) + + +class PatternCaptureTokenFilter(TokenFilter): + """Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param patterns: Required. A list of patterns to match against each token. + :type patterns: list[str] + :param preserve_original: A value indicating whether to return the original token even if one + of the patterns matches. Default is true. + :type preserve_original: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'patterns': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'patterns': {'key': 'patterns', 'type': '[str]'}, + 'preserve_original': {'key': 'preserveOriginal', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(PatternCaptureTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.PatternCaptureTokenFilter' + self.patterns = kwargs.get('patterns', None) + self.preserve_original = kwargs.get('preserve_original', True) + + +class PatternReplaceCharFilter(CharFilter): + """A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the char filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param pattern: Required. A regular expression pattern. + :type pattern: str + :param replacement: Required. The replacement text. + :type replacement: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'pattern': {'required': True}, + 'replacement': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'pattern': {'key': 'pattern', 'type': 'str'}, + 'replacement': {'key': 'replacement', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(PatternReplaceCharFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.PatternReplaceCharFilter' + self.pattern = kwargs.get('pattern', None) + self.replacement = kwargs.get('replacement', None) + + +class PatternReplaceTokenFilter(TokenFilter): + """A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param pattern: Required. A regular expression pattern. + :type pattern: str + :param replacement: Required. The replacement text. + :type replacement: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'pattern': {'required': True}, + 'replacement': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'pattern': {'key': 'pattern', 'type': 'str'}, + 'replacement': {'key': 'replacement', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(PatternReplaceTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' + self.pattern = kwargs.get('pattern', None) + self.replacement = kwargs.get('replacement', None) + + +class PatternTokenizer(Tokenizer): + """Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param pattern: A regular expression pattern to match token separators. Default is an + expression that matches one or more whitespace characters. + :type pattern: str + :param flags: Regular expression flags. Possible values include: 'CANON_EQ', + 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. + :type flags: str or ~search_service_client.models.RegexFlags + :param group: The zero-based ordinal of the matching group in the regular expression pattern to + extract into tokens. Use -1 if you want to use the entire pattern to split the input into + tokens, irrespective of matching groups. Default is -1. + :type group: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'pattern': {'key': 'pattern', 'type': 'str'}, + 'flags': {'key': 'flags', 'type': 'str'}, + 'group': {'key': 'group', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(PatternTokenizer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.PatternTokenizer' + self.pattern = kwargs.get('pattern', "\W+") + self.flags = kwargs.get('flags', None) + self.group = kwargs.get('group', -1) + + +class PhoneticTokenFilter(TokenFilter): + """Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param encoder: The phonetic encoder to use. Default is "metaphone". Possible values include: + 'metaphone', 'doubleMetaphone', 'soundex', 'refinedSoundex', 'caverphone1', 'caverphone2', + 'cologne', 'nysiis', 'koelnerPhonetik', 'haasePhonetik', 'beiderMorse'. + :type encoder: str or ~search_service_client.models.PhoneticEncoder + :param replace_original_tokens: A value indicating whether encoded tokens should replace + original tokens. If false, encoded tokens are added as synonyms. Default is true. + :type replace_original_tokens: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'encoder': {'key': 'encoder', 'type': 'str'}, + 'replace_original_tokens': {'key': 'replace', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(PhoneticTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.PhoneticTokenFilter' + self.encoder = kwargs.get('encoder', None) + self.replace_original_tokens = kwargs.get('replace_original_tokens', True) + + +class RequestOptions(msrest.serialization.Model): + """Parameter group. + + :param x_ms_client_request_id: The tracking ID sent with the request to help with debugging. + :type x_ms_client_request_id: str + """ + + _attribute_map = { + 'x_ms_client_request_id': {'key': 'x-ms-client-request-id', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(RequestOptions, self).__init__(**kwargs) + self.x_ms_client_request_id = kwargs.get('x_ms_client_request_id', None) + + +class ResourceCounter(msrest.serialization.Model): + """Represents a resource's usage and quota. + + All required parameters must be populated in order to send to Azure. + + :param usage: Required. The resource usage amount. + :type usage: long + :param quota: The resource amount quota. + :type quota: long + """ + + _validation = { + 'usage': {'required': True}, + } + + _attribute_map = { + 'usage': {'key': 'usage', 'type': 'long'}, + 'quota': {'key': 'quota', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(ResourceCounter, self).__init__(**kwargs) + self.usage = kwargs.get('usage', None) + self.quota = kwargs.get('quota', None) + + +class ScoringProfile(msrest.serialization.Model): + """Defines parameters for a search index that influence scoring in search queries. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the scoring profile. + :type name: str + :param text_weights: Parameters that boost scoring based on text matches in certain index + fields. + :type text_weights: ~search_service_client.models.TextWeights + :param functions: The collection of functions that influence the scoring of documents. + :type functions: list[~search_service_client.models.ScoringFunction] + :param function_aggregation: A value indicating how the results of individual scoring functions + should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Possible + values include: 'sum', 'average', 'minimum', 'maximum', 'firstMatching'. + :type function_aggregation: str or ~search_service_client.models.ScoringFunctionAggregation + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'text_weights': {'key': 'text', 'type': 'TextWeights'}, + 'functions': {'key': 'functions', 'type': '[ScoringFunction]'}, + 'function_aggregation': {'key': 'functionAggregation', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ScoringProfile, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.text_weights = kwargs.get('text_weights', None) + self.functions = kwargs.get('functions', None) + self.function_aggregation = kwargs.get('function_aggregation', None) + + +class SearchError(msrest.serialization.Model): + """Describes an error condition for the Azure Cognitive Search API. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar code: One of a server-defined set of error codes. + :vartype code: str + :ivar message: Required. A human-readable representation of the error. + :vartype message: str + :ivar details: An array of details about specific errors that led to this reported error. + :vartype details: list[~search_service_client.models.SearchError] + """ + + _validation = { + 'code': {'readonly': True}, + 'message': {'required': True, 'readonly': True}, + 'details': {'readonly': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[SearchError]'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchError, self).__init__(**kwargs) + self.code = None + self.message = None + self.details = None + + +class SentimentSkill(Skill): + """Text analytics positive-negative sentiment analysis, scored as a floating point value in a range of zero to 1. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param default_language_code: A value indicating which language code to use. Default is en. + Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'it', 'no', 'pl', 'pt-PT', + 'ru', 'es', 'sv', 'tr'. + :type default_language_code: str or ~search_service_client.models.SentimentSkillLanguage + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SentimentSkill, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Skills.Text.SentimentSkill' + self.default_language_code = kwargs.get('default_language_code', None) + + +class ServiceCounters(msrest.serialization.Model): + """Represents service-level resource counters and quotas. + + All required parameters must be populated in order to send to Azure. + + :param document_counter: Required. Total number of documents across all indexes in the service. + :type document_counter: ~search_service_client.models.ResourceCounter + :param index_counter: Required. Total number of indexes. + :type index_counter: ~search_service_client.models.ResourceCounter + :param indexer_counter: Required. Total number of indexers. + :type indexer_counter: ~search_service_client.models.ResourceCounter + :param data_source_counter: Required. Total number of data sources. + :type data_source_counter: ~search_service_client.models.ResourceCounter + :param storage_size_counter: Required. Total size of used storage in bytes. + :type storage_size_counter: ~search_service_client.models.ResourceCounter + :param synonym_map_counter: Required. Total number of synonym maps. + :type synonym_map_counter: ~search_service_client.models.ResourceCounter + :param skillset_counter: Required. Total number of skillsets. + :type skillset_counter: ~search_service_client.models.ResourceCounter + """ + + _validation = { + 'document_counter': {'required': True}, + 'index_counter': {'required': True}, + 'indexer_counter': {'required': True}, + 'data_source_counter': {'required': True}, + 'storage_size_counter': {'required': True}, + 'synonym_map_counter': {'required': True}, + 'skillset_counter': {'required': True}, + } + + _attribute_map = { + 'document_counter': {'key': 'documentCount', 'type': 'ResourceCounter'}, + 'index_counter': {'key': 'indexesCount', 'type': 'ResourceCounter'}, + 'indexer_counter': {'key': 'indexersCount', 'type': 'ResourceCounter'}, + 'data_source_counter': {'key': 'dataSourcesCount', 'type': 'ResourceCounter'}, + 'storage_size_counter': {'key': 'storageSize', 'type': 'ResourceCounter'}, + 'synonym_map_counter': {'key': 'synonymMaps', 'type': 'ResourceCounter'}, + 'skillset_counter': {'key': 'skillsetCount', 'type': 'ResourceCounter'}, + } + + def __init__( + self, + **kwargs + ): + super(ServiceCounters, self).__init__(**kwargs) + self.document_counter = kwargs.get('document_counter', None) + self.index_counter = kwargs.get('index_counter', None) + self.indexer_counter = kwargs.get('indexer_counter', None) + self.data_source_counter = kwargs.get('data_source_counter', None) + self.storage_size_counter = kwargs.get('storage_size_counter', None) + self.synonym_map_counter = kwargs.get('synonym_map_counter', None) + self.skillset_counter = kwargs.get('skillset_counter', None) + + +class ServiceLimits(msrest.serialization.Model): + """Represents various service level limits. + + :param max_fields_per_index: The maximum allowed fields per index. + :type max_fields_per_index: int + :param max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in an + index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. + :type max_field_nesting_depth_per_index: int + :param max_complex_collection_fields_per_index: The maximum number of fields of type + Collection(Edm.ComplexType) allowed in an index. + :type max_complex_collection_fields_per_index: int + :param max_complex_objects_in_collections_per_document: The maximum number of objects in + complex collections allowed per document. + :type max_complex_objects_in_collections_per_document: int + """ + + _attribute_map = { + 'max_fields_per_index': {'key': 'maxFieldsPerIndex', 'type': 'int'}, + 'max_field_nesting_depth_per_index': {'key': 'maxFieldNestingDepthPerIndex', 'type': 'int'}, + 'max_complex_collection_fields_per_index': {'key': 'maxComplexCollectionFieldsPerIndex', 'type': 'int'}, + 'max_complex_objects_in_collections_per_document': {'key': 'maxComplexObjectsInCollectionsPerDocument', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(ServiceLimits, self).__init__(**kwargs) + self.max_fields_per_index = kwargs.get('max_fields_per_index', None) + self.max_field_nesting_depth_per_index = kwargs.get('max_field_nesting_depth_per_index', None) + self.max_complex_collection_fields_per_index = kwargs.get('max_complex_collection_fields_per_index', None) + self.max_complex_objects_in_collections_per_document = kwargs.get('max_complex_objects_in_collections_per_document', None) + + +class ServiceStatistics(msrest.serialization.Model): + """Response from a get service statistics request. If successful, it includes service level counters and limits. + + All required parameters must be populated in order to send to Azure. + + :param counters: Required. Service level resource counters. + :type counters: ~search_service_client.models.ServiceCounters + :param limits: Required. Service level general limits. + :type limits: ~search_service_client.models.ServiceLimits + """ + + _validation = { + 'counters': {'required': True}, + 'limits': {'required': True}, + } + + _attribute_map = { + 'counters': {'key': 'counters', 'type': 'ServiceCounters'}, + 'limits': {'key': 'limits', 'type': 'ServiceLimits'}, + } + + def __init__( + self, + **kwargs + ): + super(ServiceStatistics, self).__init__(**kwargs) + self.counters = kwargs.get('counters', None) + self.limits = kwargs.get('limits', None) + + +class ShaperSkill(Skill): + """A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + } + + def __init__( + self, + **kwargs + ): + super(ShaperSkill, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Skills.Util.ShaperSkill' + + +class ShingleTokenFilter(TokenFilter): + """Creates combinations of tokens as a single token. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param max_shingle_size: The maximum shingle size. Default and minimum value is 2. + :type max_shingle_size: int + :param min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be less + than the value of maxShingleSize. + :type min_shingle_size: int + :param output_unigrams: A value indicating whether the output stream will contain the input + tokens (unigrams) as well as shingles. Default is true. + :type output_unigrams: bool + :param output_unigrams_if_no_shingles: A value indicating whether to output unigrams for those + times when no shingles are available. This property takes precedence when outputUnigrams is set + to false. Default is false. + :type output_unigrams_if_no_shingles: bool + :param token_separator: The string to use when joining adjacent tokens to form a shingle. + Default is a single space (" "). + :type token_separator: str + :param filter_token: The string to insert for each position at which there is no token. Default + is an underscore ("_"). + :type filter_token: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_shingle_size': {'minimum': 2}, + 'min_shingle_size': {'minimum': 2}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_shingle_size': {'key': 'maxShingleSize', 'type': 'int'}, + 'min_shingle_size': {'key': 'minShingleSize', 'type': 'int'}, + 'output_unigrams': {'key': 'outputUnigrams', 'type': 'bool'}, + 'output_unigrams_if_no_shingles': {'key': 'outputUnigramsIfNoShingles', 'type': 'bool'}, + 'token_separator': {'key': 'tokenSeparator', 'type': 'str'}, + 'filter_token': {'key': 'filterToken', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ShingleTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.ShingleTokenFilter' + self.max_shingle_size = kwargs.get('max_shingle_size', 2) + self.min_shingle_size = kwargs.get('min_shingle_size', 2) + self.output_unigrams = kwargs.get('output_unigrams', True) + self.output_unigrams_if_no_shingles = kwargs.get('output_unigrams_if_no_shingles', False) + self.token_separator = kwargs.get('token_separator', " ") + self.filter_token = kwargs.get('filter_token', "_") + + +class Skillset(msrest.serialization.Model): + """A list of skills. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the skillset. + :type name: str + :param description: Required. The description of the skillset. + :type description: str + :param skills: Required. A list of skills in the skillset. + :type skills: list[~search_service_client.models.Skill] + :param cognitive_services_account: Details about cognitive services to be used when running + skills. + :type cognitive_services_account: ~search_service_client.models.CognitiveServicesAccount + :param e_tag: The ETag of the skillset. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'description': {'required': True}, + 'skills': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'skills': {'key': 'skills', 'type': '[Skill]'}, + 'cognitive_services_account': {'key': 'cognitiveServices', 'type': 'CognitiveServicesAccount'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(Skillset, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.description = kwargs.get('description', None) + self.skills = kwargs.get('skills', None) + self.cognitive_services_account = kwargs.get('cognitive_services_account', None) + self.e_tag = kwargs.get('e_tag', None) + + +class SnowballTokenFilter(TokenFilter): + """A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param language: Required. The language to use. Possible values include: 'armenian', 'basque', + 'catalan', 'danish', 'dutch', 'english', 'finnish', 'french', 'german', 'german2', 'hungarian', + 'italian', 'kp', 'lovins', 'norwegian', 'porter', 'portuguese', 'romanian', 'russian', + 'spanish', 'swedish', 'turkish'. + :type language: str or ~search_service_client.models.SnowballTokenFilterLanguage + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'language': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'language': {'key': 'language', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SnowballTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.SnowballTokenFilter' + self.language = kwargs.get('language', None) + + +class SoftDeleteColumnDeletionDetectionPolicy(DataDeletionDetectionPolicy): + """Defines a data deletion detection policy that implements a soft-deletion strategy. It determines whether an item should be deleted based on the value of a designated 'soft delete' column. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param soft_delete_column_name: The name of the column to use for soft-deletion detection. + :type soft_delete_column_name: str + :param soft_delete_marker_value: The marker value that identifies an item as deleted. + :type soft_delete_marker_value: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'soft_delete_column_name': {'key': 'softDeleteColumnName', 'type': 'str'}, + 'soft_delete_marker_value': {'key': 'softDeleteMarkerValue', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SoftDeleteColumnDeletionDetectionPolicy, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy' + self.soft_delete_column_name = kwargs.get('soft_delete_column_name', None) + self.soft_delete_marker_value = kwargs.get('soft_delete_marker_value', None) + + +class SplitSkill(Skill): + """A skill to split a string into chunks of text. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param default_language_code: A value indicating which language code to use. Default is en. + Possible values include: 'da', 'de', 'en', 'es', 'fi', 'fr', 'it', 'ko', 'pt'. + :type default_language_code: str or ~search_service_client.models.SplitSkillLanguage + :param text_split_mode: A value indicating which split mode to perform. Possible values + include: 'pages', 'sentences'. + :type text_split_mode: str or ~search_service_client.models.TextSplitMode + :param maximum_page_length: The desired maximum page length. Default is 10000. + :type maximum_page_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'}, + 'text_split_mode': {'key': 'textSplitMode', 'type': 'str'}, + 'maximum_page_length': {'key': 'maximumPageLength', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(SplitSkill, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Skills.Text.SplitSkill' + self.default_language_code = kwargs.get('default_language_code', None) + self.text_split_mode = kwargs.get('text_split_mode', None) + self.maximum_page_length = kwargs.get('maximum_page_length', None) + + +class SqlIntegratedChangeTrackingPolicy(DataChangeDetectionPolicy): + """Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SqlIntegratedChangeTrackingPolicy, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' + + +class StandardAnalyzer(Analyzer): + """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + :param stopwords: A list of stopwords. + :type stopwords: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + 'stopwords': {'key': 'stopwords', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(StandardAnalyzer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' + self.max_token_length = kwargs.get('max_token_length', 255) + self.stopwords = kwargs.get('stopwords', None) + + +class StandardTokenizer(Tokenizer): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(StandardTokenizer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' + self.max_token_length = kwargs.get('max_token_length', 255) + + +class StandardTokenizerV2(Tokenizer): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(StandardTokenizerV2, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' + self.max_token_length = kwargs.get('max_token_length', 255) + + +class StemmerOverrideTokenFilter(TokenFilter): + """Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param rules: Required. A list of stemming rules in the following format: "word => stem", for + example: "ran => run". + :type rules: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'rules': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'rules': {'key': 'rules', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(StemmerOverrideTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' + self.rules = kwargs.get('rules', None) + + +class StemmerTokenFilter(TokenFilter): + """Language specific stemming filter. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param language: Required. The language to use. Possible values include: 'arabic', 'armenian', + 'basque', 'brazilian', 'bulgarian', 'catalan', 'czech', 'danish', 'dutch', 'dutchKp', + 'english', 'lightEnglish', 'minimalEnglish', 'possessiveEnglish', 'porter2', 'lovins', + 'finnish', 'lightFinnish', 'french', 'lightFrench', 'minimalFrench', 'galician', + 'minimalGalician', 'german', 'german2', 'lightGerman', 'minimalGerman', 'greek', 'hindi', + 'hungarian', 'lightHungarian', 'indonesian', 'irish', 'italian', 'lightItalian', 'sorani', + 'latvian', 'norwegian', 'lightNorwegian', 'minimalNorwegian', 'lightNynorsk', 'minimalNynorsk', + 'portuguese', 'lightPortuguese', 'minimalPortuguese', 'portugueseRslp', 'romanian', 'russian', + 'lightRussian', 'spanish', 'lightSpanish', 'swedish', 'lightSwedish', 'turkish'. + :type language: str or ~search_service_client.models.StemmerTokenFilterLanguage + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'language': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'language': {'key': 'language', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(StemmerTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.StemmerTokenFilter' + self.language = kwargs.get('language', None) + + +class StopAnalyzer(Analyzer): + """Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param stopwords: A list of stopwords. + :type stopwords: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'stopwords': {'key': 'stopwords', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(StopAnalyzer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.StopAnalyzer' + self.stopwords = kwargs.get('stopwords', None) + + +class StopwordsTokenFilter(TokenFilter): + """Removes stop words from a token stream. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param stopwords: The list of stopwords. This property and the stopwords list property cannot + both be set. + :type stopwords: list[str] + :param stopwords_list: A predefined list of stopwords to use. This property and the stopwords + property cannot both be set. Default is English. Possible values include: 'arabic', 'armenian', + 'basque', 'brazilian', 'bulgarian', 'catalan', 'czech', 'danish', 'dutch', 'english', + 'finnish', 'french', 'galician', 'german', 'greek', 'hindi', 'hungarian', 'indonesian', + 'irish', 'italian', 'latvian', 'norwegian', 'persian', 'portuguese', 'romanian', 'russian', + 'sorani', 'spanish', 'swedish', 'thai', 'turkish'. + :type stopwords_list: str or ~search_service_client.models.StopwordsList + :param ignore_case: A value indicating whether to ignore case. If true, all words are converted + to lower case first. Default is false. + :type ignore_case: bool + :param remove_trailing_stop_words: A value indicating whether to ignore the last search term if + it's a stop word. Default is true. + :type remove_trailing_stop_words: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'stopwords': {'key': 'stopwords', 'type': '[str]'}, + 'stopwords_list': {'key': 'stopwordsList', 'type': 'str'}, + 'ignore_case': {'key': 'ignoreCase', 'type': 'bool'}, + 'remove_trailing_stop_words': {'key': 'removeTrailing', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(StopwordsTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.StopwordsTokenFilter' + self.stopwords = kwargs.get('stopwords', None) + self.stopwords_list = kwargs.get('stopwords_list', None) + self.ignore_case = kwargs.get('ignore_case', False) + self.remove_trailing_stop_words = kwargs.get('remove_trailing_stop_words', True) + + +class Suggester(msrest.serialization.Model): + """Defines how the Suggest API should apply to a group of fields in the index. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the suggester. + :type name: str + :ivar search_mode: Required. A value indicating the capabilities of the suggester. Default + value: "analyzingInfixMatching". + :vartype search_mode: str + :param source_fields: Required. The list of field names to which the suggester applies. Each + field must be searchable. + :type source_fields: list[str] + """ + + _validation = { + 'name': {'required': True}, + 'search_mode': {'required': True, 'constant': True}, + 'source_fields': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'search_mode': {'key': 'searchMode', 'type': 'str'}, + 'source_fields': {'key': 'sourceFields', 'type': '[str]'}, + } + + search_mode = "analyzingInfixMatching" + + def __init__( + self, + **kwargs + ): + super(Suggester, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.source_fields = kwargs.get('source_fields', None) + + +class SynonymMap(msrest.serialization.Model): + """Represents a synonym map definition. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the synonym map. + :type name: str + :ivar format: Required. The format of the synonym map. Only the 'solr' format is currently + supported. Default value: "solr". + :vartype format: str + :param synonyms: Required. A series of synonym rules in the specified synonym map format. The + rules must be separated by newlines. + :type synonyms: str + :param encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your data when you + want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive + Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive + Search will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with + customer-managed keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :type encryption_key: ~search_service_client.models.EncryptionKey + :param e_tag: The ETag of the synonym map. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'format': {'required': True, 'constant': True}, + 'synonyms': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'format': {'key': 'format', 'type': 'str'}, + 'synonyms': {'key': 'synonyms', 'type': 'str'}, + 'encryption_key': {'key': 'encryptionKey', 'type': 'EncryptionKey'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + format = "solr" + + def __init__( + self, + **kwargs + ): + super(SynonymMap, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.synonyms = kwargs.get('synonyms', None) + self.encryption_key = kwargs.get('encryption_key', None) + self.e_tag = kwargs.get('e_tag', None) + + +class SynonymTokenFilter(TokenFilter): + """Matches single or multi-word synonyms in a token stream. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param synonyms: Required. A list of synonyms in following one of two formats: 1. incredible, + unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced + with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma + separated list of equivalent words. Set the expand option to change how this list is + interpreted. + :type synonyms: list[str] + :param ignore_case: A value indicating whether to case-fold input for matching. Default is + false. + :type ignore_case: bool + :param expand: A value indicating whether all words in the list of synonyms (if => notation is + not used) will map to one another. If true, all words in the list of synonyms (if => notation + is not used) will map to one another. The following list: incredible, unbelievable, fabulous, + amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, + unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, + fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => + incredible. Default is true. + :type expand: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'synonyms': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'synonyms': {'key': 'synonyms', 'type': '[str]'}, + 'ignore_case': {'key': 'ignoreCase', 'type': 'bool'}, + 'expand': {'key': 'expand', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(SynonymTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.SynonymTokenFilter' + self.synonyms = kwargs.get('synonyms', None) + self.ignore_case = kwargs.get('ignore_case', False) + self.expand = kwargs.get('expand', True) + + +class TagScoringFunction(ScoringFunction): + """Defines a function that boosts scores of documents with string values matching a given list of tags. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param field_name: Required. The name of the field used as input to the scoring function. + :type field_name: str + :param boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :type boost: float + :param interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', + 'logarithmic'. + :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + :param parameters: Required. Parameter values for the tag scoring function. + :type parameters: ~search_service_client.models.TagScoringParameters + """ + + _validation = { + 'type': {'required': True}, + 'field_name': {'required': True}, + 'boost': {'required': True}, + 'parameters': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'field_name': {'key': 'fieldName', 'type': 'str'}, + 'boost': {'key': 'boost', 'type': 'float'}, + 'interpolation': {'key': 'interpolation', 'type': 'str'}, + 'parameters': {'key': 'tag', 'type': 'TagScoringParameters'}, + } + + def __init__( + self, + **kwargs + ): + super(TagScoringFunction, self).__init__(**kwargs) + self.type = 'tag' + self.parameters = kwargs.get('parameters', None) + + +class TagScoringParameters(msrest.serialization.Model): + """Provides parameter values to a tag scoring function. + + All required parameters must be populated in order to send to Azure. + + :param tags_parameter: Required. The name of the parameter passed in search queries to specify + the list of tags to compare against the target field. + :type tags_parameter: str + """ + + _validation = { + 'tags_parameter': {'required': True}, + } + + _attribute_map = { + 'tags_parameter': {'key': 'tagsParameter', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(TagScoringParameters, self).__init__(**kwargs) + self.tags_parameter = kwargs.get('tags_parameter', None) + + +class TextTranslationSkill(Skill): + """A skill to translate text from one language to another. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param default_to_language_code: Required. The language code to translate documents into for + documents that don't specify the to language explicitly. Possible values include: 'af', 'ar', + 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', + 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', + 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', + 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', + 'vi', 'cy', 'yua'. + :type default_to_language_code: str or + ~search_service_client.models.TextTranslationSkillLanguage + :param default_from_language_code: The language code to translate documents from for documents + that don't specify the from language explicitly. Possible values include: 'af', 'ar', 'bn', + 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', 'fil', + 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', 'tlh', + 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', 'sr-Cyrl', + 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', 'vi', 'cy', + 'yua'. + :type default_from_language_code: str or + ~search_service_client.models.TextTranslationSkillLanguage + :param suggested_from: The language code to translate documents from when neither the + fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the + automatic language detection is unsuccessful. Default is en. Possible values include: 'af', + 'ar', 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', + 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', + 'sw', 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', + 'sm', 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', + 'ur', 'vi', 'cy', 'yua'. + :type suggested_from: str or ~search_service_client.models.TextTranslationSkillLanguage + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + 'default_to_language_code': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'default_to_language_code': {'key': 'defaultToLanguageCode', 'type': 'str'}, + 'default_from_language_code': {'key': 'defaultFromLanguageCode', 'type': 'str'}, + 'suggested_from': {'key': 'suggestedFrom', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(TextTranslationSkill, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Skills.Text.TranslationSkill' + self.default_to_language_code = kwargs.get('default_to_language_code', None) + self.default_from_language_code = kwargs.get('default_from_language_code', None) + self.suggested_from = kwargs.get('suggested_from', None) + + +class TextWeights(msrest.serialization.Model): + """Defines weights on index fields for which matches should boost scoring in search queries. + + All required parameters must be populated in order to send to Azure. + + :param weights: Required. The dictionary of per-field weights to boost document scoring. The + keys are field names and the values are the weights for each field. + :type weights: dict[str, float] + """ + + _validation = { + 'weights': {'required': True}, + } + + _attribute_map = { + 'weights': {'key': 'weights', 'type': '{float}'}, + } + + def __init__( + self, + **kwargs + ): + super(TextWeights, self).__init__(**kwargs) + self.weights = kwargs.get('weights', None) + + +class TokenInfo(msrest.serialization.Model): + """Information about a token returned by an analyzer. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar token: Required. The token returned by the analyzer. + :vartype token: str + :ivar start_offset: Required. The index of the first character of the token in the input text. + :vartype start_offset: int + :ivar end_offset: Required. The index of the last character of the token in the input text. + :vartype end_offset: int + :ivar position: Required. The position of the token in the input text relative to other tokens. + The first token in the input text has position 0, the next has position 1, and so on. Depending + on the analyzer used, some tokens might have the same position, for example if they are + synonyms of each other. + :vartype position: int + """ + + _validation = { + 'token': {'required': True, 'readonly': True}, + 'start_offset': {'required': True, 'readonly': True}, + 'end_offset': {'required': True, 'readonly': True}, + 'position': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'token': {'key': 'token', 'type': 'str'}, + 'start_offset': {'key': 'startOffset', 'type': 'int'}, + 'end_offset': {'key': 'endOffset', 'type': 'int'}, + 'position': {'key': 'position', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(TokenInfo, self).__init__(**kwargs) + self.token = None + self.start_offset = None + self.end_offset = None + self.position = None + + +class TruncateTokenFilter(TokenFilter): + """Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param length: The length at which terms will be truncated. Default and maximum is 300. + :type length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'length': {'key': 'length', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(TruncateTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.TruncateTokenFilter' + self.length = kwargs.get('length', 300) + + +class UaxUrlEmailTokenizer(Tokenizer): + """Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(UaxUrlEmailTokenizer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.UaxUrlEmailTokenizer' + self.max_token_length = kwargs.get('max_token_length', 255) + + +class UniqueTokenFilter(TokenFilter): + """Filters out tokens with same text as the previous token. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param only_on_same_position: A value indicating whether to remove duplicates only at the same + position. Default is false. + :type only_on_same_position: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'only_on_same_position': {'key': 'onlyOnSamePosition', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(UniqueTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.UniqueTokenFilter' + self.only_on_same_position = kwargs.get('only_on_same_position', False) + + +class WebApiSkill(Skill): + """A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param uri: Required. The url for the Web API. + :type uri: str + :param http_headers: The headers required to make the http request. + :type http_headers: dict[str, str] + :param http_method: The method for the http request. + :type http_method: str + :param timeout: The desired timeout for the request. Default is 30 seconds. + :type timeout: ~datetime.timedelta + :param batch_size: The desired batch size which indicates number of documents. + :type batch_size: int + :param degree_of_parallelism: If set, the number of parallel calls that can be made to the Web + API. + :type degree_of_parallelism: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + 'uri': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'uri': {'key': 'uri', 'type': 'str'}, + 'http_headers': {'key': 'httpHeaders', 'type': '{str}'}, + 'http_method': {'key': 'httpMethod', 'type': 'str'}, + 'timeout': {'key': 'timeout', 'type': 'duration'}, + 'batch_size': {'key': 'batchSize', 'type': 'int'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(WebApiSkill, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Skills.Custom.WebApiSkill' + self.uri = kwargs.get('uri', None) + self.http_headers = kwargs.get('http_headers', None) + self.http_method = kwargs.get('http_method', None) + self.timeout = kwargs.get('timeout', None) + self.batch_size = kwargs.get('batch_size', None) + self.degree_of_parallelism = kwargs.get('degree_of_parallelism', None) + + +class WordDelimiterTokenFilter(TokenFilter): + """Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param generate_word_parts: A value indicating whether to generate part words. If set, causes + parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is + true. + :type generate_word_parts: bool + :param generate_number_parts: A value indicating whether to generate number subwords. Default + is true. + :type generate_number_parts: bool + :param catenate_words: A value indicating whether maximum runs of word parts will be catenated. + For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false. + :type catenate_words: bool + :param catenate_numbers: A value indicating whether maximum runs of number parts will be + catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. + :type catenate_numbers: bool + :param catenate_all: A value indicating whether all subword parts will be catenated. For + example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. + :type catenate_all: bool + :param split_on_case_change: A value indicating whether to split words on caseChange. For + example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. + :type split_on_case_change: bool + :param preserve_original: A value indicating whether original words will be preserved and added + to the subword list. Default is false. + :type preserve_original: bool + :param split_on_numerics: A value indicating whether to split on numbers. For example, if this + is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. + :type split_on_numerics: bool + :param stem_english_possessive: A value indicating whether to remove trailing "'s" for each + subword. Default is true. + :type stem_english_possessive: bool + :param protected_words: A list of tokens to protect from being delimited. + :type protected_words: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'generate_word_parts': {'key': 'generateWordParts', 'type': 'bool'}, + 'generate_number_parts': {'key': 'generateNumberParts', 'type': 'bool'}, + 'catenate_words': {'key': 'catenateWords', 'type': 'bool'}, + 'catenate_numbers': {'key': 'catenateNumbers', 'type': 'bool'}, + 'catenate_all': {'key': 'catenateAll', 'type': 'bool'}, + 'split_on_case_change': {'key': 'splitOnCaseChange', 'type': 'bool'}, + 'preserve_original': {'key': 'preserveOriginal', 'type': 'bool'}, + 'split_on_numerics': {'key': 'splitOnNumerics', 'type': 'bool'}, + 'stem_english_possessive': {'key': 'stemEnglishPossessive', 'type': 'bool'}, + 'protected_words': {'key': 'protectedWords', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(WordDelimiterTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.WordDelimiterTokenFilter' + self.generate_word_parts = kwargs.get('generate_word_parts', True) + self.generate_number_parts = kwargs.get('generate_number_parts', True) + self.catenate_words = kwargs.get('catenate_words', False) + self.catenate_numbers = kwargs.get('catenate_numbers', False) + self.catenate_all = kwargs.get('catenate_all', False) + self.split_on_case_change = kwargs.get('split_on_case_change', True) + self.preserve_original = kwargs.get('preserve_original', False) + self.split_on_numerics = kwargs.get('split_on_numerics', True) + self.stem_english_possessive = kwargs.get('stem_english_possessive', True) + self.protected_words = kwargs.get('protected_words', None) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py new file mode 100644 index 0000000000000..4afae06e35328 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py @@ -0,0 +1,5678 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import Dict, List, Optional, Union + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + + +class AccessCondition(msrest.serialization.Model): + """Parameter group. + + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str + """ + + _attribute_map = { + 'if_match': {'key': 'If-Match', 'type': 'str'}, + 'if_none_match': {'key': 'If-None-Match', 'type': 'str'}, + } + + def __init__( + self, + *, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + **kwargs + ): + super(AccessCondition, self).__init__(**kwargs) + self.if_match = if_match + self.if_none_match = if_none_match + + +class Analyzer(msrest.serialization.Model): + """Abstract base class for analyzers. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: CustomAnalyzer, PatternAnalyzer, StandardAnalyzer, StopAnalyzer. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.CustomAnalyzer': 'CustomAnalyzer', '#Microsoft.Azure.Search.PatternAnalyzer': 'PatternAnalyzer', '#Microsoft.Azure.Search.StandardAnalyzer': 'StandardAnalyzer', '#Microsoft.Azure.Search.StopAnalyzer': 'StopAnalyzer'} + } + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(Analyzer, self).__init__(**kwargs) + self.odata_type = None + self.name = name + + +class AnalyzeRequest(msrest.serialization.Model): + """Specifies some text and analysis components used to break that text into tokens. + + All required parameters must be populated in order to send to Azure. + + :param text: Required. The text to break into tokens. + :type text: str + :param analyzer: The name of the analyzer to use to break the given text. If this parameter is + not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are + mutually exclusive. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', + 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- + Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', + 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', + 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', + 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', + 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', + 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', + 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', + 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', + 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- + PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', + 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', + 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', + 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', + 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', + 'simple', 'stop', 'whitespace'. + :type analyzer: str or ~search_service_client.models.AnalyzerName + :param tokenizer: The name of the tokenizer to use to break the given text. If this parameter + is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters + are mutually exclusive. Possible values include: 'classic', 'edgeNGram', 'keyword_v2', + 'letter', 'lowercase', 'microsoft_language_tokenizer', 'microsoft_language_stemming_tokenizer', + 'nGram', 'path_hierarchy_v2', 'pattern', 'standard_v2', 'uax_url_email', 'whitespace'. + :type tokenizer: str or ~search_service_client.models.TokenizerName + :param token_filters: An optional list of token filters to use when breaking the given text. + This parameter can only be set when using the tokenizer parameter. + :type token_filters: list[str or ~search_service_client.models.TokenFilterName] + :param char_filters: An optional list of character filters to use when breaking the given text. + This parameter can only be set when using the tokenizer parameter. + :type char_filters: list[str] + """ + + _validation = { + 'text': {'required': True}, + } + + _attribute_map = { + 'text': {'key': 'text', 'type': 'str'}, + 'analyzer': {'key': 'analyzer', 'type': 'str'}, + 'tokenizer': {'key': 'tokenizer', 'type': 'str'}, + 'token_filters': {'key': 'tokenFilters', 'type': '[str]'}, + 'char_filters': {'key': 'charFilters', 'type': '[str]'}, + } + + def __init__( + self, + *, + text: str, + analyzer: Optional[Union[str, "AnalyzerName"]] = None, + tokenizer: Optional[Union[str, "TokenizerName"]] = None, + token_filters: Optional[List[Union[str, "TokenFilterName"]]] = None, + char_filters: Optional[List[str]] = None, + **kwargs + ): + super(AnalyzeRequest, self).__init__(**kwargs) + self.text = text + self.analyzer = analyzer + self.tokenizer = tokenizer + self.token_filters = token_filters + self.char_filters = char_filters + + +class AnalyzeResult(msrest.serialization.Model): + """The result of testing an analyzer on text. + + All required parameters must be populated in order to send to Azure. + + :param tokens: Required. The list of tokens returned by the analyzer specified in the request. + :type tokens: list[~search_service_client.models.TokenInfo] + """ + + _validation = { + 'tokens': {'required': True}, + } + + _attribute_map = { + 'tokens': {'key': 'tokens', 'type': '[TokenInfo]'}, + } + + def __init__( + self, + *, + tokens: List["TokenInfo"], + **kwargs + ): + super(AnalyzeResult, self).__init__(**kwargs) + self.tokens = tokens + + +class TokenFilter(msrest.serialization.Model): + """Abstract base class for token filters. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: AsciiFoldingTokenFilter, CjkBigramTokenFilter, CommonGramTokenFilter, DictionaryDecompounderTokenFilter, EdgeNGramTokenFilter, EdgeNGramTokenFilterV2, ElisionTokenFilter, KeepTokenFilter, KeywordMarkerTokenFilter, LengthTokenFilter, LimitTokenFilter, NGramTokenFilter, NGramTokenFilterV2, PatternCaptureTokenFilter, PatternReplaceTokenFilter, PhoneticTokenFilter, ShingleTokenFilter, SnowballTokenFilter, StemmerOverrideTokenFilter, StemmerTokenFilter, StopwordsTokenFilter, SynonymTokenFilter, TruncateTokenFilter, UniqueTokenFilter, WordDelimiterTokenFilter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.AsciiFoldingTokenFilter': 'AsciiFoldingTokenFilter', '#Microsoft.Azure.Search.CjkBigramTokenFilter': 'CjkBigramTokenFilter', '#Microsoft.Azure.Search.CommonGramTokenFilter': 'CommonGramTokenFilter', '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter': 'DictionaryDecompounderTokenFilter', '#Microsoft.Azure.Search.EdgeNGramTokenFilter': 'EdgeNGramTokenFilter', '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2': 'EdgeNGramTokenFilterV2', '#Microsoft.Azure.Search.ElisionTokenFilter': 'ElisionTokenFilter', '#Microsoft.Azure.Search.KeepTokenFilter': 'KeepTokenFilter', '#Microsoft.Azure.Search.KeywordMarkerTokenFilter': 'KeywordMarkerTokenFilter', '#Microsoft.Azure.Search.LengthTokenFilter': 'LengthTokenFilter', '#Microsoft.Azure.Search.LimitTokenFilter': 'LimitTokenFilter', '#Microsoft.Azure.Search.NGramTokenFilter': 'NGramTokenFilter', '#Microsoft.Azure.Search.NGramTokenFilterV2': 'NGramTokenFilterV2', '#Microsoft.Azure.Search.PatternCaptureTokenFilter': 'PatternCaptureTokenFilter', '#Microsoft.Azure.Search.PatternReplaceTokenFilter': 'PatternReplaceTokenFilter', '#Microsoft.Azure.Search.PhoneticTokenFilter': 'PhoneticTokenFilter', '#Microsoft.Azure.Search.ShingleTokenFilter': 'ShingleTokenFilter', '#Microsoft.Azure.Search.SnowballTokenFilter': 'SnowballTokenFilter', '#Microsoft.Azure.Search.StemmerOverrideTokenFilter': 'StemmerOverrideTokenFilter', '#Microsoft.Azure.Search.StemmerTokenFilter': 'StemmerTokenFilter', '#Microsoft.Azure.Search.StopwordsTokenFilter': 'StopwordsTokenFilter', '#Microsoft.Azure.Search.SynonymTokenFilter': 'SynonymTokenFilter', '#Microsoft.Azure.Search.TruncateTokenFilter': 'TruncateTokenFilter', '#Microsoft.Azure.Search.UniqueTokenFilter': 'UniqueTokenFilter', '#Microsoft.Azure.Search.WordDelimiterTokenFilter': 'WordDelimiterTokenFilter'} + } + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(TokenFilter, self).__init__(**kwargs) + self.odata_type = None + self.name = name + + +class AsciiFoldingTokenFilter(TokenFilter): + """Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param preserve_original: A value indicating whether the original token will be kept. Default + is false. + :type preserve_original: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'preserve_original': {'key': 'preserveOriginal', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + preserve_original: Optional[bool] = False, + **kwargs + ): + super(AsciiFoldingTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.AsciiFoldingTokenFilter' + self.preserve_original = preserve_original + + +class AzureActiveDirectoryApplicationCredentials(msrest.serialization.Model): + """Credentials of a registered application created for your search service, used for authenticated access to the encryption keys stored in Azure Key Vault. + + All required parameters must be populated in order to send to Azure. + + :param application_id: Required. An AAD Application ID that was granted the required access + permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The + Application ID should not be confused with the Object ID for your AAD Application. + :type application_id: str + :param application_secret: The authentication key of the specified AAD application. + :type application_secret: str + """ + + _validation = { + 'application_id': {'required': True}, + } + + _attribute_map = { + 'application_id': {'key': 'applicationId', 'type': 'str'}, + 'application_secret': {'key': 'applicationSecret', 'type': 'str'}, + } + + def __init__( + self, + *, + application_id: str, + application_secret: Optional[str] = None, + **kwargs + ): + super(AzureActiveDirectoryApplicationCredentials, self).__init__(**kwargs) + self.application_id = application_id + self.application_secret = application_secret + + +class CharFilter(msrest.serialization.Model): + """Abstract base class for character filters. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MappingCharFilter, PatternReplaceCharFilter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the char filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.MappingCharFilter': 'MappingCharFilter', '#Microsoft.Azure.Search.PatternReplaceCharFilter': 'PatternReplaceCharFilter'} + } + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(CharFilter, self).__init__(**kwargs) + self.odata_type = None + self.name = name + + +class CjkBigramTokenFilter(TokenFilter): + """Forms bigrams of CJK terms that are generated from StandardTokenizer. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param ignore_scripts: The scripts to ignore. + :type ignore_scripts: list[str or ~search_service_client.models.CjkBigramTokenFilterScripts] + :param output_unigrams: A value indicating whether to output both unigrams and bigrams (if + true), or just bigrams (if false). Default is false. + :type output_unigrams: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'ignore_scripts': {'key': 'ignoreScripts', 'type': '[str]'}, + 'output_unigrams': {'key': 'outputUnigrams', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + ignore_scripts: Optional[List[Union[str, "CjkBigramTokenFilterScripts"]]] = None, + output_unigrams: Optional[bool] = False, + **kwargs + ): + super(CjkBigramTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.CjkBigramTokenFilter' + self.ignore_scripts = ignore_scripts + self.output_unigrams = output_unigrams + + +class Tokenizer(msrest.serialization.Model): + """Abstract base class for tokenizers. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, PathHierarchyTokenizerV2, PatternTokenizer, StandardTokenizer, StandardTokenizerV2, UaxUrlEmailTokenizer. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.ClassicTokenizer': 'ClassicTokenizer', '#Microsoft.Azure.Search.EdgeNGramTokenizer': 'EdgeNGramTokenizer', '#Microsoft.Azure.Search.KeywordTokenizer': 'KeywordTokenizer', '#Microsoft.Azure.Search.KeywordTokenizerV2': 'KeywordTokenizerV2', '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer': 'MicrosoftLanguageStemmingTokenizer', '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer': 'MicrosoftLanguageTokenizer', '#Microsoft.Azure.Search.NGramTokenizer': 'NGramTokenizer', '#Microsoft.Azure.Search.PathHierarchyTokenizerV2': 'PathHierarchyTokenizerV2', '#Microsoft.Azure.Search.PatternTokenizer': 'PatternTokenizer', '#Microsoft.Azure.Search.StandardTokenizer': 'StandardTokenizer', '#Microsoft.Azure.Search.StandardTokenizerV2': 'StandardTokenizerV2', '#Microsoft.Azure.Search.UaxUrlEmailTokenizer': 'UaxUrlEmailTokenizer'} + } + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(Tokenizer, self).__init__(**kwargs) + self.odata_type = None + self.name = name + + +class ClassicTokenizer(Tokenizer): + """Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 255, + **kwargs + ): + super(ClassicTokenizer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.ClassicTokenizer' + self.max_token_length = max_token_length + + +class CognitiveServicesAccount(msrest.serialization.Model): + """Abstract base class for describing any cognitive service resource attached to the skillset. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: CognitiveServicesAccountKey, DefaultCognitiveServicesAccount. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param description: + :type description: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.CognitiveServicesByKey': 'CognitiveServicesAccountKey', '#Microsoft.Azure.Search.DefaultCognitiveServices': 'DefaultCognitiveServicesAccount'} + } + + def __init__( + self, + *, + description: Optional[str] = None, + **kwargs + ): + super(CognitiveServicesAccount, self).__init__(**kwargs) + self.odata_type = None + self.description = description + + +class CognitiveServicesAccountKey(CognitiveServicesAccount): + """A cognitive service resource provisioned with a key that is attached to a skillset. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param description: + :type description: str + :param key: Required. + :type key: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'key': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'key': {'key': 'key', 'type': 'str'}, + } + + def __init__( + self, + *, + key: str, + description: Optional[str] = None, + **kwargs + ): + super(CognitiveServicesAccountKey, self).__init__(description=description, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.CognitiveServicesByKey' + self.key = key + + +class CommonGramTokenFilter(TokenFilter): + """Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param common_words: Required. The set of common words. + :type common_words: list[str] + :param ignore_case: A value indicating whether common words matching will be case insensitive. + Default is false. + :type ignore_case: bool + :param use_query_mode: A value that indicates whether the token filter is in query mode. When + in query mode, the token filter generates bigrams and then removes common words and single + terms followed by a common word. Default is false. + :type use_query_mode: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'common_words': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'common_words': {'key': 'commonWords', 'type': '[str]'}, + 'ignore_case': {'key': 'ignoreCase', 'type': 'bool'}, + 'use_query_mode': {'key': 'queryMode', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + common_words: List[str], + ignore_case: Optional[bool] = False, + use_query_mode: Optional[bool] = False, + **kwargs + ): + super(CommonGramTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.CommonGramTokenFilter' + self.common_words = common_words + self.ignore_case = ignore_case + self.use_query_mode = use_query_mode + + +class Skill(msrest.serialization.Model): + """Abstract base class for skills. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: WebApiSkill, EntityRecognitionSkill, KeyPhraseExtractionSkill, LanguageDetectionSkill, MergeSkill, SentimentSkill, SplitSkill, TextTranslationSkill, ConditionalSkill, ShaperSkill, ImageAnalysisSkill, OcrSkill. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Skills.Custom.WebApiSkill': 'WebApiSkill', '#Microsoft.Skills.Text.EntityRecognitionSkill': 'EntityRecognitionSkill', '#Microsoft.Skills.Text.KeyPhraseExtractionSkill': 'KeyPhraseExtractionSkill', '#Microsoft.Skills.Text.LanguageDetectionSkill': 'LanguageDetectionSkill', '#Microsoft.Skills.Text.MergeSkill': 'MergeSkill', '#Microsoft.Skills.Text.SentimentSkill': 'SentimentSkill', '#Microsoft.Skills.Text.SplitSkill': 'SplitSkill', '#Microsoft.Skills.Text.TranslationSkill': 'TextTranslationSkill', '#Microsoft.Skills.Util.ConditionalSkill': 'ConditionalSkill', '#Microsoft.Skills.Util.ShaperSkill': 'ShaperSkill', '#Microsoft.Skills.Vision.ImageAnalysisSkill': 'ImageAnalysisSkill', '#Microsoft.Skills.Vision.OcrSkill': 'OcrSkill'} + } + + def __init__( + self, + *, + inputs: List["InputFieldMappingEntry"], + outputs: List["OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + **kwargs + ): + super(Skill, self).__init__(**kwargs) + self.odata_type = None + self.name = name + self.description = description + self.context = context + self.inputs = inputs + self.outputs = outputs + + +class ConditionalSkill(Skill): + """A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + } + + def __init__( + self, + *, + inputs: List["InputFieldMappingEntry"], + outputs: List["OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + **kwargs + ): + super(ConditionalSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) + self.odata_type = '#Microsoft.Skills.Util.ConditionalSkill' + + +class CorsOptions(msrest.serialization.Model): + """Defines options to control Cross-Origin Resource Sharing (CORS) for an index. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The list of origins from which JavaScript code will be + granted access to your index. Can contain a list of hosts of the form {protocol}://{fully- + qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). + :type allowed_origins: list[str] + :param max_age_in_seconds: The duration for which browsers should cache CORS preflight + responses. Defaults to 5 minutes. + :type max_age_in_seconds: long + """ + + _validation = { + 'allowed_origins': {'required': True}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'allowedOrigins', 'type': '[str]'}, + 'max_age_in_seconds': {'key': 'maxAgeInSeconds', 'type': 'long'}, + } + + def __init__( + self, + *, + allowed_origins: List[str], + max_age_in_seconds: Optional[int] = None, + **kwargs + ): + super(CorsOptions, self).__init__(**kwargs) + self.allowed_origins = allowed_origins + self.max_age_in_seconds = max_age_in_seconds + + +class CustomAnalyzer(Analyzer): + """Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param tokenizer: Required. The name of the tokenizer to use to divide continuous text into a + sequence of tokens, such as breaking a sentence into words. Possible values include: 'classic', + 'edgeNGram', 'keyword_v2', 'letter', 'lowercase', 'microsoft_language_tokenizer', + 'microsoft_language_stemming_tokenizer', 'nGram', 'path_hierarchy_v2', 'pattern', + 'standard_v2', 'uax_url_email', 'whitespace'. + :type tokenizer: str or ~search_service_client.models.TokenizerName + :param token_filters: A list of token filters used to filter out or modify the tokens generated + by a tokenizer. For example, you can specify a lowercase filter that converts all characters to + lowercase. The filters are run in the order in which they are listed. + :type token_filters: list[str or ~search_service_client.models.TokenFilterName] + :param char_filters: A list of character filters used to prepare input text before it is + processed by the tokenizer. For instance, they can replace certain characters or symbols. The + filters are run in the order in which they are listed. + :type char_filters: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'tokenizer': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'tokenizer': {'key': 'tokenizer', 'type': 'str'}, + 'token_filters': {'key': 'tokenFilters', 'type': '[str]'}, + 'char_filters': {'key': 'charFilters', 'type': '[str]'}, + } + + def __init__( + self, + *, + name: str, + tokenizer: Union[str, "TokenizerName"], + token_filters: Optional[List[Union[str, "TokenFilterName"]]] = None, + char_filters: Optional[List[str]] = None, + **kwargs + ): + super(CustomAnalyzer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer' + self.tokenizer = tokenizer + self.token_filters = token_filters + self.char_filters = char_filters + + +class DataChangeDetectionPolicy(msrest.serialization.Model): + """Abstract base class for data change detection policies. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: HighWaterMarkChangeDetectionPolicy, SqlIntegratedChangeTrackingPolicy. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy': 'HighWaterMarkChangeDetectionPolicy', '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy': 'SqlIntegratedChangeTrackingPolicy'} + } + + def __init__( + self, + **kwargs + ): + super(DataChangeDetectionPolicy, self).__init__(**kwargs) + self.odata_type = None + + +class DataContainer(msrest.serialization.Model): + """Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the table or view (for Azure SQL data source) or collection + (for CosmosDB data source) that will be indexed. + :type name: str + :param query: A query that is applied to this data container. The syntax and meaning of this + parameter is datasource-specific. Not supported by Azure SQL datasources. + :type query: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'query': {'key': 'query', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + query: Optional[str] = None, + **kwargs + ): + super(DataContainer, self).__init__(**kwargs) + self.name = name + self.query = query + + +class DataDeletionDetectionPolicy(msrest.serialization.Model): + """Abstract base class for data deletion detection policies. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: SoftDeleteColumnDeletionDetectionPolicy. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy': 'SoftDeleteColumnDeletionDetectionPolicy'} + } + + def __init__( + self, + **kwargs + ): + super(DataDeletionDetectionPolicy, self).__init__(**kwargs) + self.odata_type = None + + +class DataSource(msrest.serialization.Model): + """Represents a datasource definition, which can be used to configure an indexer. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the datasource. + :type name: str + :param description: The description of the datasource. + :type description: str + :param type: Required. The type of the datasource. Possible values include: 'azuresql', + 'cosmosdb', 'azureblob', 'azuretable', 'mysql'. + :type type: str or ~search_service_client.models.DataSourceType + :param credentials: Required. Credentials for the datasource. + :type credentials: ~search_service_client.models.DataSourceCredentials + :param container: Required. The data container for the datasource. + :type container: ~search_service_client.models.DataContainer + :param data_change_detection_policy: The data change detection policy for the datasource. + :type data_change_detection_policy: ~search_service_client.models.DataChangeDetectionPolicy + :param data_deletion_detection_policy: The data deletion detection policy for the datasource. + :type data_deletion_detection_policy: ~search_service_client.models.DataDeletionDetectionPolicy + :param e_tag: The ETag of the DataSource. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'type': {'required': True}, + 'credentials': {'required': True}, + 'container': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'DataSourceCredentials'}, + 'container': {'key': 'container', 'type': 'DataContainer'}, + 'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'}, + 'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + type: Union[str, "DataSourceType"], + credentials: "DataSourceCredentials", + container: "DataContainer", + description: Optional[str] = None, + data_change_detection_policy: Optional["DataChangeDetectionPolicy"] = None, + data_deletion_detection_policy: Optional["DataDeletionDetectionPolicy"] = None, + e_tag: Optional[str] = None, + **kwargs + ): + super(DataSource, self).__init__(**kwargs) + self.name = name + self.description = description + self.type = type + self.credentials = credentials + self.container = container + self.data_change_detection_policy = data_change_detection_policy + self.data_deletion_detection_policy = data_deletion_detection_policy + self.e_tag = e_tag + + +class DataSourceCredentials(msrest.serialization.Model): + """Represents credentials that can be used to connect to a datasource. + + :param connection_string: The connection string for the datasource. + :type connection_string: str + """ + + _attribute_map = { + 'connection_string': {'key': 'connectionString', 'type': 'str'}, + } + + def __init__( + self, + *, + connection_string: Optional[str] = None, + **kwargs + ): + super(DataSourceCredentials, self).__init__(**kwargs) + self.connection_string = connection_string + + +class DefaultCognitiveServicesAccount(CognitiveServicesAccount): + """An empty object that represents the default cognitive service resource for a skillset. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param description: + :type description: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + } + + def __init__( + self, + *, + description: Optional[str] = None, + **kwargs + ): + super(DefaultCognitiveServicesAccount, self).__init__(description=description, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.DefaultCognitiveServices' + + +class DictionaryDecompounderTokenFilter(TokenFilter): + """Decomposes compound words found in many Germanic languages. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param word_list: Required. The list of words to match against. + :type word_list: list[str] + :param min_word_size: The minimum word size. Only words longer than this get processed. Default + is 5. Maximum is 300. + :type min_word_size: int + :param min_subword_size: The minimum subword size. Only subwords longer than this are + outputted. Default is 2. Maximum is 300. + :type min_subword_size: int + :param max_subword_size: The maximum subword size. Only subwords shorter than this are + outputted. Default is 15. Maximum is 300. + :type max_subword_size: int + :param only_longest_match: A value indicating whether to add only the longest matching subword + to the output. Default is false. + :type only_longest_match: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'word_list': {'required': True}, + 'min_word_size': {'maximum': 300}, + 'min_subword_size': {'maximum': 300}, + 'max_subword_size': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'word_list': {'key': 'wordList', 'type': '[str]'}, + 'min_word_size': {'key': 'minWordSize', 'type': 'int'}, + 'min_subword_size': {'key': 'minSubwordSize', 'type': 'int'}, + 'max_subword_size': {'key': 'maxSubwordSize', 'type': 'int'}, + 'only_longest_match': {'key': 'onlyLongestMatch', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + word_list: List[str], + min_word_size: Optional[int] = 5, + min_subword_size: Optional[int] = 2, + max_subword_size: Optional[int] = 15, + only_longest_match: Optional[bool] = False, + **kwargs + ): + super(DictionaryDecompounderTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' + self.word_list = word_list + self.min_word_size = min_word_size + self.min_subword_size = min_subword_size + self.max_subword_size = max_subword_size + self.only_longest_match = only_longest_match + + +class ScoringFunction(msrest.serialization.Model): + """Abstract base class for functions that can modify document scores during ranking. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: DistanceScoringFunction, FreshnessScoringFunction, MagnitudeScoringFunction, TagScoringFunction. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param field_name: Required. The name of the field used as input to the scoring function. + :type field_name: str + :param boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :type boost: float + :param interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', + 'logarithmic'. + :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + """ + + _validation = { + 'type': {'required': True}, + 'field_name': {'required': True}, + 'boost': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'field_name': {'key': 'fieldName', 'type': 'str'}, + 'boost': {'key': 'boost', 'type': 'float'}, + 'interpolation': {'key': 'interpolation', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'distance': 'DistanceScoringFunction', 'freshness': 'FreshnessScoringFunction', 'magnitude': 'MagnitudeScoringFunction', 'tag': 'TagScoringFunction'} + } + + def __init__( + self, + *, + field_name: str, + boost: float, + interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None, + **kwargs + ): + super(ScoringFunction, self).__init__(**kwargs) + self.type = None + self.field_name = field_name + self.boost = boost + self.interpolation = interpolation + + +class DistanceScoringFunction(ScoringFunction): + """Defines a function that boosts scores based on distance from a geographic location. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param field_name: Required. The name of the field used as input to the scoring function. + :type field_name: str + :param boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :type boost: float + :param interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', + 'logarithmic'. + :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + :param parameters: Required. Parameter values for the distance scoring function. + :type parameters: ~search_service_client.models.DistanceScoringParameters + """ + + _validation = { + 'type': {'required': True}, + 'field_name': {'required': True}, + 'boost': {'required': True}, + 'parameters': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'field_name': {'key': 'fieldName', 'type': 'str'}, + 'boost': {'key': 'boost', 'type': 'float'}, + 'interpolation': {'key': 'interpolation', 'type': 'str'}, + 'parameters': {'key': 'distance', 'type': 'DistanceScoringParameters'}, + } + + def __init__( + self, + *, + field_name: str, + boost: float, + parameters: "DistanceScoringParameters", + interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None, + **kwargs + ): + super(DistanceScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) + self.type = 'distance' + self.parameters = parameters + + +class DistanceScoringParameters(msrest.serialization.Model): + """Provides parameter values to a distance scoring function. + + All required parameters must be populated in order to send to Azure. + + :param reference_point_parameter: Required. The name of the parameter passed in search queries + to specify the reference location. + :type reference_point_parameter: str + :param boosting_distance: Required. The distance in kilometers from the reference location + where the boosting range ends. + :type boosting_distance: float + """ + + _validation = { + 'reference_point_parameter': {'required': True}, + 'boosting_distance': {'required': True}, + } + + _attribute_map = { + 'reference_point_parameter': {'key': 'referencePointParameter', 'type': 'str'}, + 'boosting_distance': {'key': 'boostingDistance', 'type': 'float'}, + } + + def __init__( + self, + *, + reference_point_parameter: str, + boosting_distance: float, + **kwargs + ): + super(DistanceScoringParameters, self).__init__(**kwargs) + self.reference_point_parameter = reference_point_parameter + self.boosting_distance = boosting_distance + + +class EdgeNGramTokenFilter(TokenFilter): + """Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + maxGram. + :type min_gram: int + :param max_gram: The maximum n-gram length. Default is 2. + :type max_gram: int + :param side: Specifies which side of the input the n-gram should be generated from. Default is + "front". Possible values include: 'front', 'back'. + :type side: str or ~search_service_client.models.EdgeNGramTokenFilterSide + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min_gram': {'key': 'minGram', 'type': 'int'}, + 'max_gram': {'key': 'maxGram', 'type': 'int'}, + 'side': {'key': 'side', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + min_gram: Optional[int] = 1, + max_gram: Optional[int] = 2, + side: Optional[Union[str, "EdgeNGramTokenFilterSide"]] = None, + **kwargs + ): + super(EdgeNGramTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilter' + self.min_gram = min_gram + self.max_gram = max_gram + self.side = side + + +class EdgeNGramTokenFilterV2(TokenFilter): + """Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :type min_gram: int + :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :type max_gram: int + :param side: Specifies which side of the input the n-gram should be generated from. Default is + "front". Possible values include: 'front', 'back'. + :type side: str or ~search_service_client.models.EdgeNGramTokenFilterSide + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'min_gram': {'maximum': 300}, + 'max_gram': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min_gram': {'key': 'minGram', 'type': 'int'}, + 'max_gram': {'key': 'maxGram', 'type': 'int'}, + 'side': {'key': 'side', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + min_gram: Optional[int] = 1, + max_gram: Optional[int] = 2, + side: Optional[Union[str, "EdgeNGramTokenFilterSide"]] = None, + **kwargs + ): + super(EdgeNGramTokenFilterV2, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2' + self.min_gram = min_gram + self.max_gram = max_gram + self.side = side + + +class EdgeNGramTokenizer(Tokenizer): + """Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :type min_gram: int + :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :type max_gram: int + :param token_chars: Character classes to keep in the tokens. + :type token_chars: list[str or ~search_service_client.models.TokenCharacterKind] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'min_gram': {'maximum': 300}, + 'max_gram': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min_gram': {'key': 'minGram', 'type': 'int'}, + 'max_gram': {'key': 'maxGram', 'type': 'int'}, + 'token_chars': {'key': 'tokenChars', 'type': '[str]'}, + } + + def __init__( + self, + *, + name: str, + min_gram: Optional[int] = 1, + max_gram: Optional[int] = 2, + token_chars: Optional[List[Union[str, "TokenCharacterKind"]]] = None, + **kwargs + ): + super(EdgeNGramTokenizer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenizer' + self.min_gram = min_gram + self.max_gram = max_gram + self.token_chars = token_chars + + +class ElisionTokenFilter(TokenFilter): + """Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param articles: The set of articles to remove. + :type articles: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'articles': {'key': 'articles', 'type': '[str]'}, + } + + def __init__( + self, + *, + name: str, + articles: Optional[List[str]] = None, + **kwargs + ): + super(ElisionTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.ElisionTokenFilter' + self.articles = articles + + +class EncryptionKey(msrest.serialization.Model): + """A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. + + All required parameters must be populated in order to send to Azure. + + :param key_vault_key_name: Required. The name of your Azure Key Vault key to be used to encrypt + your data at rest. + :type key_vault_key_name: str + :param key_vault_key_version: Required. The version of your Azure Key Vault key to be used to + encrypt your data at rest. + :type key_vault_key_version: str + :param key_vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, + that contains the key to be used to encrypt your data at rest. An example URI might be + https://my-keyvault-name.vault.azure.net. + :type key_vault_uri: str + :param access_credentials: Optional Azure Active Directory credentials used for accessing your + Azure Key Vault. Not required if using managed identity instead. + :type access_credentials: + ~search_service_client.models.AzureActiveDirectoryApplicationCredentials + """ + + _validation = { + 'key_vault_key_name': {'required': True}, + 'key_vault_key_version': {'required': True}, + 'key_vault_uri': {'required': True}, + } + + _attribute_map = { + 'key_vault_key_name': {'key': 'keyVaultKeyName', 'type': 'str'}, + 'key_vault_key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'}, + 'key_vault_uri': {'key': 'keyVaultUri', 'type': 'str'}, + 'access_credentials': {'key': 'accessCredentials', 'type': 'AzureActiveDirectoryApplicationCredentials'}, + } + + def __init__( + self, + *, + key_vault_key_name: str, + key_vault_key_version: str, + key_vault_uri: str, + access_credentials: Optional["AzureActiveDirectoryApplicationCredentials"] = None, + **kwargs + ): + super(EncryptionKey, self).__init__(**kwargs) + self.key_vault_key_name = key_vault_key_name + self.key_vault_key_version = key_vault_key_version + self.key_vault_uri = key_vault_uri + self.access_credentials = access_credentials + + +class EntityRecognitionSkill(Skill): + """Text analytics entity recognition. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param categories: A list of entity categories that should be extracted. + :type categories: list[str or ~search_service_client.models.EntityCategory] + :param default_language_code: A value indicating which language code to use. Default is en. + Possible values include: 'ar', 'cs', 'zh-Hans', 'zh-Hant', 'da', 'nl', 'en', 'fi', 'fr', 'de', + 'el', 'hu', 'it', 'ja', 'ko', 'no', 'pl', 'pt-PT', 'pt-BR', 'ru', 'es', 'sv', 'tr'. + :type default_language_code: str or + ~search_service_client.models.EntityRecognitionSkillLanguage + :param include_typeless_entities: Determines whether or not to include entities which are well + known but don't conform to a pre-defined type. If this configuration is not set (default), set + to null or set to false, entities which don't conform to one of the pre-defined types will not + be surfaced. + :type include_typeless_entities: bool + :param minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence score is greater than the value specified. If not set (default), or if explicitly + set to null, all entities will be included. + :type minimum_precision: float + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'categories': {'key': 'categories', 'type': '[str]'}, + 'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'}, + 'include_typeless_entities': {'key': 'includeTypelessEntities', 'type': 'bool'}, + 'minimum_precision': {'key': 'minimumPrecision', 'type': 'float'}, + } + + def __init__( + self, + *, + inputs: List["InputFieldMappingEntry"], + outputs: List["OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + categories: Optional[List[Union[str, "EntityCategory"]]] = None, + default_language_code: Optional[Union[str, "EntityRecognitionSkillLanguage"]] = None, + include_typeless_entities: Optional[bool] = None, + minimum_precision: Optional[float] = None, + **kwargs + ): + super(EntityRecognitionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) + self.odata_type = '#Microsoft.Skills.Text.EntityRecognitionSkill' + self.categories = categories + self.default_language_code = default_language_code + self.include_typeless_entities = include_typeless_entities + self.minimum_precision = minimum_precision + + +class Field(msrest.serialization.Model): + """Represents a field in an index definition, which describes the name, data type, and search behavior of a field. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the field, which must be unique within the fields collection + of the index or parent field. + :type name: str + :param type: Required. The data type of the field. Possible values include: 'Edm.String', + 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', + 'Edm.GeographyPoint', 'Edm.ComplexType'. + :type type: str or ~search_service_client.models.DataType + :param key: A value indicating whether the field uniquely identifies documents in the index. + Exactly one top-level field in each index must be chosen as the key field and it must be of + type Edm.String. Key fields can be used to look up documents directly and update or delete + specific documents. Default is false for simple fields and null for complex fields. + :type key: bool + :param retrievable: A value indicating whether the field can be returned in a search result. + You can disable this option if you want to use a field (for example, margin) as a filter, + sorting, or scoring mechanism but do not want the field to be visible to the end user. This + property must be true for key fields, and it must be null for complex fields. This property can + be changed on existing fields. Enabling this property does not cause any increase in index + storage requirements. Default is true for simple fields and null for complex fields. + :type retrievable: bool + :param searchable: A value indicating whether the field is full-text searchable. This means it + will undergo analysis such as word-breaking during indexing. If you set a searchable field to a + value like "sunny day", internally it will be split into the individual tokens "sunny" and + "day". This enables full-text searches for these terms. Fields of type Edm.String or + Collection(Edm.String) are searchable by default. This property must be false for simple fields + of other non-string data types, and it must be null for complex fields. Note: searchable fields + consume extra space in your index since Azure Cognitive Search will store an additional + tokenized version of the field value for full-text searches. If you want to save space in your + index and you don't need a field to be included in searches, set searchable to false. + :type searchable: bool + :param filterable: A value indicating whether to enable the field to be referenced in $filter + queries. filterable differs from searchable in how strings are handled. Fields of type + Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so + comparisons are for exact matches only. For example, if you set such a field f to "sunny day", + $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property + must be null for complex fields. Default is true for simple fields and null for complex fields. + :type filterable: bool + :param sortable: A value indicating whether to enable the field to be referenced in $orderby + expressions. By default Azure Cognitive Search sorts results by score, but in many experiences + users will want to sort by fields in the documents. A simple field can be sortable only if it + is single-valued (it has a single value in the scope of the parent document). Simple collection + fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex + collections are also multi-valued, and therefore cannot be sortable. This is true whether it's + an immediate parent field, or an ancestor field, that's the complex collection. Complex fields + cannot be sortable and the sortable property must be null for such fields. The default for + sortable is true for single-valued simple fields, false for multi-valued simple fields, and + null for complex fields. + :type sortable: bool + :param facetable: A value indicating whether to enable the field to be referenced in facet + queries. Typically used in a presentation of search results that includes hit count by category + (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so + on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or + Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple + fields. + :type facetable: bool + :param analyzer: The name of the language analyzer to use for the field. This option can be + used only with searchable fields and it can't be set together with either searchAnalyzer or + indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null + for complex fields. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', + 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- + Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', + 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', + 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', + 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', + 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', + 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', + 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', + 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', + 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- + PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', + 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', + 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', + 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', + 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', + 'simple', 'stop', 'whitespace'. + :type analyzer: str or ~search_service_client.models.AnalyzerName + :param search_analyzer: The name of the analyzer used at search time for the field. This option + can be used only with searchable fields. It must be set together with indexAnalyzer and it + cannot be set together with the analyzer option. This analyzer can be updated on an existing + field. Must be null for complex fields. Possible values include: 'ar.microsoft', 'ar.lucene', + 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', + 'ca.lucene', 'zh-Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', + 'hr.microsoft', 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', + 'nl.lucene', 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', + 'fr.microsoft', 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', + 'el.lucene', 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', + 'hu.lucene', 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', + 'it.lucene', 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', + 'lv.microsoft', 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', + 'nb.microsoft', 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt- + BR.lucene', 'pt-PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', + 'ru.microsoft', 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', + 'sl.microsoft', 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', + 'te.microsoft', 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', + 'ur.microsoft', 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', + 'pattern', 'simple', 'stop', 'whitespace'. + :type search_analyzer: str or ~search_service_client.models.AnalyzerName + :param index_analyzer: The name of the analyzer used at indexing time for the field. This + option can be used only with searchable fields. It must be set together with searchAnalyzer and + it cannot be set together with the analyzer option. Once the analyzer is chosen, it cannot be + changed for the field. Must be null for complex fields. Possible values include: + 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft', + 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh-Hans.lucene', 'zh- + Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', 'cs.lucene', 'da.microsoft', + 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', 'en.lucene', 'et.microsoft', + 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', 'gl.lucene', 'de.microsoft', + 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', 'he.microsoft', 'hi.microsoft', + 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', 'id.microsoft', 'id.lucene', + 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', 'ja.lucene', 'kn.microsoft', + 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', 'lt.microsoft', 'ml.microsoft', + 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', 'fa.lucene', 'pl.microsoft', + 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', 'pt-PT.lucene', + 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- + cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', + 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', + 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', + 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', + 'whitespace'. + :type index_analyzer: str or ~search_service_client.models.AnalyzerName + :param synonym_maps: A list of the names of synonym maps to associate with this field. This + option can be used only with searchable fields. Currently only one synonym map per field is + supported. Assigning a synonym map to a field ensures that query terms targeting that field are + expanded at query-time using the rules in the synonym map. This attribute can be changed on + existing fields. Must be null or an empty collection for complex fields. + :type synonym_maps: list[str] + :param fields: A list of sub-fields if this is a field of type Edm.ComplexType or + Collection(Edm.ComplexType). Must be null or empty for simple fields. + :type fields: list[~search_service_client.models.Field] + """ + + _validation = { + 'name': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'key': {'key': 'key', 'type': 'bool'}, + 'retrievable': {'key': 'retrievable', 'type': 'bool'}, + 'searchable': {'key': 'searchable', 'type': 'bool'}, + 'filterable': {'key': 'filterable', 'type': 'bool'}, + 'sortable': {'key': 'sortable', 'type': 'bool'}, + 'facetable': {'key': 'facetable', 'type': 'bool'}, + 'analyzer': {'key': 'analyzer', 'type': 'str'}, + 'search_analyzer': {'key': 'searchAnalyzer', 'type': 'str'}, + 'index_analyzer': {'key': 'indexAnalyzer', 'type': 'str'}, + 'synonym_maps': {'key': 'synonymMaps', 'type': '[str]'}, + 'fields': {'key': 'fields', 'type': '[Field]'}, + } + + def __init__( + self, + *, + name: str, + type: Union[str, "DataType"], + key: Optional[bool] = None, + retrievable: Optional[bool] = None, + searchable: Optional[bool] = None, + filterable: Optional[bool] = None, + sortable: Optional[bool] = None, + facetable: Optional[bool] = None, + analyzer: Optional[Union[str, "AnalyzerName"]] = None, + search_analyzer: Optional[Union[str, "AnalyzerName"]] = None, + index_analyzer: Optional[Union[str, "AnalyzerName"]] = None, + synonym_maps: Optional[List[str]] = None, + fields: Optional[List["Field"]] = None, + **kwargs + ): + super(Field, self).__init__(**kwargs) + self.name = name + self.type = type + self.key = key + self.retrievable = retrievable + self.searchable = searchable + self.filterable = filterable + self.sortable = sortable + self.facetable = facetable + self.analyzer = analyzer + self.search_analyzer = search_analyzer + self.index_analyzer = index_analyzer + self.synonym_maps = synonym_maps + self.fields = fields + + +class FieldMapping(msrest.serialization.Model): + """Defines a mapping between a field in a data source and a target field in an index. + + All required parameters must be populated in order to send to Azure. + + :param source_field_name: Required. The name of the field in the data source. + :type source_field_name: str + :param target_field_name: The name of the target field in the index. Same as the source field + name by default. + :type target_field_name: str + :param mapping_function: A function to apply to each source field value before indexing. + :type mapping_function: ~search_service_client.models.FieldMappingFunction + """ + + _validation = { + 'source_field_name': {'required': True}, + } + + _attribute_map = { + 'source_field_name': {'key': 'sourceFieldName', 'type': 'str'}, + 'target_field_name': {'key': 'targetFieldName', 'type': 'str'}, + 'mapping_function': {'key': 'mappingFunction', 'type': 'FieldMappingFunction'}, + } + + def __init__( + self, + *, + source_field_name: str, + target_field_name: Optional[str] = None, + mapping_function: Optional["FieldMappingFunction"] = None, + **kwargs + ): + super(FieldMapping, self).__init__(**kwargs) + self.source_field_name = source_field_name + self.target_field_name = target_field_name + self.mapping_function = mapping_function + + +class FieldMappingFunction(msrest.serialization.Model): + """Represents a function that transforms a value from a data source before indexing. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the field mapping function. + :type name: str + :param parameters: A dictionary of parameter name/value pairs to pass to the function. Each + value must be of a primitive type. + :type parameters: dict[str, object] + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '{object}'}, + } + + def __init__( + self, + *, + name: str, + parameters: Optional[Dict[str, object]] = None, + **kwargs + ): + super(FieldMappingFunction, self).__init__(**kwargs) + self.name = name + self.parameters = parameters + + +class FreshnessScoringFunction(ScoringFunction): + """Defines a function that boosts scores based on the value of a date-time field. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param field_name: Required. The name of the field used as input to the scoring function. + :type field_name: str + :param boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :type boost: float + :param interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', + 'logarithmic'. + :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + :param parameters: Required. Parameter values for the freshness scoring function. + :type parameters: ~search_service_client.models.FreshnessScoringParameters + """ + + _validation = { + 'type': {'required': True}, + 'field_name': {'required': True}, + 'boost': {'required': True}, + 'parameters': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'field_name': {'key': 'fieldName', 'type': 'str'}, + 'boost': {'key': 'boost', 'type': 'float'}, + 'interpolation': {'key': 'interpolation', 'type': 'str'}, + 'parameters': {'key': 'freshness', 'type': 'FreshnessScoringParameters'}, + } + + def __init__( + self, + *, + field_name: str, + boost: float, + parameters: "FreshnessScoringParameters", + interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None, + **kwargs + ): + super(FreshnessScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) + self.type = 'freshness' + self.parameters = parameters + + +class FreshnessScoringParameters(msrest.serialization.Model): + """Provides parameter values to a freshness scoring function. + + All required parameters must be populated in order to send to Azure. + + :param boosting_duration: Required. The expiration period after which boosting will stop for a + particular document. + :type boosting_duration: ~datetime.timedelta + """ + + _validation = { + 'boosting_duration': {'required': True}, + } + + _attribute_map = { + 'boosting_duration': {'key': 'boostingDuration', 'type': 'duration'}, + } + + def __init__( + self, + *, + boosting_duration: datetime.timedelta, + **kwargs + ): + super(FreshnessScoringParameters, self).__init__(**kwargs) + self.boosting_duration = boosting_duration + + +class GetIndexStatisticsResult(msrest.serialization.Model): + """Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar document_count: Required. The number of documents in the index. + :vartype document_count: long + :ivar storage_size: Required. The amount of storage in bytes consumed by the index. + :vartype storage_size: long + """ + + _validation = { + 'document_count': {'required': True, 'readonly': True}, + 'storage_size': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'document_count': {'key': 'documentCount', 'type': 'long'}, + 'storage_size': {'key': 'storageSize', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(GetIndexStatisticsResult, self).__init__(**kwargs) + self.document_count = None + self.storage_size = None + + +class HighWaterMarkChangeDetectionPolicy(DataChangeDetectionPolicy): + """Defines a data change detection policy that captures changes based on the value of a high water mark column. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param high_water_mark_column_name: Required. The name of the high water mark column. + :type high_water_mark_column_name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'high_water_mark_column_name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'high_water_mark_column_name': {'key': 'highWaterMarkColumnName', 'type': 'str'}, + } + + def __init__( + self, + *, + high_water_mark_column_name: str, + **kwargs + ): + super(HighWaterMarkChangeDetectionPolicy, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' + self.high_water_mark_column_name = high_water_mark_column_name + + +class ImageAnalysisSkill(Skill): + """A skill that analyzes image files. It extracts a rich set of visual features based on the image content. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param default_language_code: A value indicating which language code to use. Default is en. + Possible values include: 'en', 'es', 'ja', 'pt', 'zh'. + :type default_language_code: str or ~search_service_client.models.ImageAnalysisSkillLanguage + :param visual_features: A list of visual features. + :type visual_features: list[str or ~search_service_client.models.VisualFeature] + :param details: A string indicating which domain-specific details to return. + :type details: list[str or ~search_service_client.models.ImageDetail] + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'}, + 'visual_features': {'key': 'visualFeatures', 'type': '[str]'}, + 'details': {'key': 'details', 'type': '[str]'}, + } + + def __init__( + self, + *, + inputs: List["InputFieldMappingEntry"], + outputs: List["OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_language_code: Optional[Union[str, "ImageAnalysisSkillLanguage"]] = None, + visual_features: Optional[List[Union[str, "VisualFeature"]]] = None, + details: Optional[List[Union[str, "ImageDetail"]]] = None, + **kwargs + ): + super(ImageAnalysisSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) + self.odata_type = '#Microsoft.Skills.Vision.ImageAnalysisSkill' + self.default_language_code = default_language_code + self.visual_features = visual_features + self.details = details + + +class Index(msrest.serialization.Model): + """Represents a search index definition, which describes the fields and search behavior of an index. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the index. + :type name: str + :param fields: Required. The fields of the index. + :type fields: list[~search_service_client.models.Field] + :param scoring_profiles: The scoring profiles for the index. + :type scoring_profiles: list[~search_service_client.models.ScoringProfile] + :param default_scoring_profile: The name of the scoring profile to use if none is specified in + the query. If this property is not set and no scoring profile is specified in the query, then + default scoring (tf-idf) will be used. + :type default_scoring_profile: str + :param cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. + :type cors_options: ~search_service_client.models.CorsOptions + :param suggesters: The suggesters for the index. + :type suggesters: list[~search_service_client.models.Suggester] + :param analyzers: The analyzers for the index. + :type analyzers: list[~search_service_client.models.Analyzer] + :param tokenizers: The tokenizers for the index. + :type tokenizers: list[~search_service_client.models.Tokenizer] + :param token_filters: The token filters for the index. + :type token_filters: list[~search_service_client.models.TokenFilter] + :param char_filters: The character filters for the index. + :type char_filters: list[~search_service_client.models.CharFilter] + :param encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your data when you + want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive + Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive + Search will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with + customer-managed keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :type encryption_key: ~search_service_client.models.EncryptionKey + :param e_tag: The ETag of the index. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'fields': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'fields': {'key': 'fields', 'type': '[Field]'}, + 'scoring_profiles': {'key': 'scoringProfiles', 'type': '[ScoringProfile]'}, + 'default_scoring_profile': {'key': 'defaultScoringProfile', 'type': 'str'}, + 'cors_options': {'key': 'corsOptions', 'type': 'CorsOptions'}, + 'suggesters': {'key': 'suggesters', 'type': '[Suggester]'}, + 'analyzers': {'key': 'analyzers', 'type': '[Analyzer]'}, + 'tokenizers': {'key': 'tokenizers', 'type': '[Tokenizer]'}, + 'token_filters': {'key': 'tokenFilters', 'type': '[TokenFilter]'}, + 'char_filters': {'key': 'charFilters', 'type': '[CharFilter]'}, + 'encryption_key': {'key': 'encryptionKey', 'type': 'EncryptionKey'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + fields: List["Field"], + scoring_profiles: Optional[List["ScoringProfile"]] = None, + default_scoring_profile: Optional[str] = None, + cors_options: Optional["CorsOptions"] = None, + suggesters: Optional[List["Suggester"]] = None, + analyzers: Optional[List["Analyzer"]] = None, + tokenizers: Optional[List["Tokenizer"]] = None, + token_filters: Optional[List["TokenFilter"]] = None, + char_filters: Optional[List["CharFilter"]] = None, + encryption_key: Optional["EncryptionKey"] = None, + e_tag: Optional[str] = None, + **kwargs + ): + super(Index, self).__init__(**kwargs) + self.name = name + self.fields = fields + self.scoring_profiles = scoring_profiles + self.default_scoring_profile = default_scoring_profile + self.cors_options = cors_options + self.suggesters = suggesters + self.analyzers = analyzers + self.tokenizers = tokenizers + self.token_filters = token_filters + self.char_filters = char_filters + self.encryption_key = encryption_key + self.e_tag = e_tag + + +class Indexer(msrest.serialization.Model): + """Represents an indexer. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the indexer. + :type name: str + :param description: The description of the indexer. + :type description: str + :param data_source_name: Required. The name of the datasource from which this indexer reads + data. + :type data_source_name: str + :param skillset_name: The name of the skillset executing with this indexer. + :type skillset_name: str + :param target_index_name: Required. The name of the index to which this indexer writes data. + :type target_index_name: str + :param schedule: The schedule for this indexer. + :type schedule: ~search_service_client.models.IndexingSchedule + :param parameters: Parameters for indexer execution. + :type parameters: ~search_service_client.models.IndexingParameters + :param field_mappings: Defines mappings between fields in the data source and corresponding + target fields in the index. + :type field_mappings: list[~search_service_client.models.FieldMapping] + :param output_field_mappings: Output field mappings are applied after enrichment and + immediately before indexing. + :type output_field_mappings: list[~search_service_client.models.FieldMapping] + :param is_disabled: A value indicating whether the indexer is disabled. Default is false. + :type is_disabled: bool + :param e_tag: The ETag of the Indexer. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'data_source_name': {'required': True}, + 'target_index_name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'data_source_name': {'key': 'dataSourceName', 'type': 'str'}, + 'skillset_name': {'key': 'skillsetName', 'type': 'str'}, + 'target_index_name': {'key': 'targetIndexName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'IndexingSchedule'}, + 'parameters': {'key': 'parameters', 'type': 'IndexingParameters'}, + 'field_mappings': {'key': 'fieldMappings', 'type': '[FieldMapping]'}, + 'output_field_mappings': {'key': 'outputFieldMappings', 'type': '[FieldMapping]'}, + 'is_disabled': {'key': 'disabled', 'type': 'bool'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + data_source_name: str, + target_index_name: str, + description: Optional[str] = None, + skillset_name: Optional[str] = None, + schedule: Optional["IndexingSchedule"] = None, + parameters: Optional["IndexingParameters"] = None, + field_mappings: Optional[List["FieldMapping"]] = None, + output_field_mappings: Optional[List["FieldMapping"]] = None, + is_disabled: Optional[bool] = False, + e_tag: Optional[str] = None, + **kwargs + ): + super(Indexer, self).__init__(**kwargs) + self.name = name + self.description = description + self.data_source_name = data_source_name + self.skillset_name = skillset_name + self.target_index_name = target_index_name + self.schedule = schedule + self.parameters = parameters + self.field_mappings = field_mappings + self.output_field_mappings = output_field_mappings + self.is_disabled = is_disabled + self.e_tag = e_tag + + +class IndexerExecutionInfo(msrest.serialization.Model): + """Represents the current status and execution history of an indexer. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar status: Required. Overall indexer status. Possible values include: 'unknown', 'error', + 'running'. + :vartype status: str or ~search_service_client.models.IndexerStatus + :ivar last_result: The result of the most recent or an in-progress indexer execution. + :vartype last_result: ~search_service_client.models.IndexerExecutionResult + :ivar execution_history: Required. History of the recent indexer executions, sorted in reverse + chronological order. + :vartype execution_history: list[~search_service_client.models.IndexerExecutionResult] + :ivar limits: Required. The execution limits for the indexer. + :vartype limits: ~search_service_client.models.IndexerLimits + """ + + _validation = { + 'status': {'required': True, 'readonly': True}, + 'last_result': {'readonly': True}, + 'execution_history': {'required': True, 'readonly': True}, + 'limits': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'last_result': {'key': 'lastResult', 'type': 'IndexerExecutionResult'}, + 'execution_history': {'key': 'executionHistory', 'type': '[IndexerExecutionResult]'}, + 'limits': {'key': 'limits', 'type': 'IndexerLimits'}, + } + + def __init__( + self, + **kwargs + ): + super(IndexerExecutionInfo, self).__init__(**kwargs) + self.status = None + self.last_result = None + self.execution_history = None + self.limits = None + + +class IndexerExecutionResult(msrest.serialization.Model): + """Represents the result of an individual indexer execution. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar status: Required. The outcome of this indexer execution. Possible values include: + 'transientFailure', 'success', 'inProgress', 'reset'. + :vartype status: str or ~search_service_client.models.IndexerExecutionStatus + :ivar error_message: The error message indicating the top-level error, if any. + :vartype error_message: str + :ivar start_time: The start time of this indexer execution. + :vartype start_time: ~datetime.datetime + :ivar end_time: The end time of this indexer execution, if the execution has already completed. + :vartype end_time: ~datetime.datetime + :ivar errors: Required. The item-level indexing errors. + :vartype errors: list[~search_service_client.models.ItemError] + :ivar warnings: Required. The item-level indexing warnings. + :vartype warnings: list[~search_service_client.models.ItemWarning] + :ivar item_count: Required. The number of items that were processed during this indexer + execution. This includes both successfully processed items and items where indexing was + attempted but failed. + :vartype item_count: int + :ivar failed_item_count: Required. The number of items that failed to be indexed during this + indexer execution. + :vartype failed_item_count: int + :ivar initial_tracking_state: Change tracking state with which an indexer execution started. + :vartype initial_tracking_state: str + :ivar final_tracking_state: Change tracking state with which an indexer execution finished. + :vartype final_tracking_state: str + """ + + _validation = { + 'status': {'required': True, 'readonly': True}, + 'error_message': {'readonly': True}, + 'start_time': {'readonly': True}, + 'end_time': {'readonly': True}, + 'errors': {'required': True, 'readonly': True}, + 'warnings': {'required': True, 'readonly': True}, + 'item_count': {'required': True, 'readonly': True}, + 'failed_item_count': {'required': True, 'readonly': True}, + 'initial_tracking_state': {'readonly': True}, + 'final_tracking_state': {'readonly': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'errors': {'key': 'errors', 'type': '[ItemError]'}, + 'warnings': {'key': 'warnings', 'type': '[ItemWarning]'}, + 'item_count': {'key': 'itemsProcessed', 'type': 'int'}, + 'failed_item_count': {'key': 'itemsFailed', 'type': 'int'}, + 'initial_tracking_state': {'key': 'initialTrackingState', 'type': 'str'}, + 'final_tracking_state': {'key': 'finalTrackingState', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(IndexerExecutionResult, self).__init__(**kwargs) + self.status = None + self.error_message = None + self.start_time = None + self.end_time = None + self.errors = None + self.warnings = None + self.item_count = None + self.failed_item_count = None + self.initial_tracking_state = None + self.final_tracking_state = None + + +class IndexerLimits(msrest.serialization.Model): + """IndexerLimits. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar max_run_time: The maximum duration that the indexer is permitted to run for one + execution. + :vartype max_run_time: ~datetime.timedelta + :ivar max_document_extraction_size: The maximum size of a document, in bytes, which will be + considered valid for indexing. + :vartype max_document_extraction_size: long + :ivar max_document_content_characters_to_extract: The maximum number of characters that will be + extracted from a document picked up for indexing. + :vartype max_document_content_characters_to_extract: long + """ + + _validation = { + 'max_run_time': {'readonly': True}, + 'max_document_extraction_size': {'readonly': True}, + 'max_document_content_characters_to_extract': {'readonly': True}, + } + + _attribute_map = { + 'max_run_time': {'key': 'maxRunTime', 'type': 'duration'}, + 'max_document_extraction_size': {'key': 'maxDocumentExtractionSize', 'type': 'long'}, + 'max_document_content_characters_to_extract': {'key': 'maxDocumentContentCharactersToExtract', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(IndexerLimits, self).__init__(**kwargs) + self.max_run_time = None + self.max_document_extraction_size = None + self.max_document_content_characters_to_extract = None + + +class IndexingParameters(msrest.serialization.Model): + """Represents parameters for indexer execution. + + :param batch_size: The number of items that are read from the data source and indexed as a + single batch in order to improve performance. The default depends on the data source type. + :type batch_size: int + :param max_failed_items: The maximum number of items that can fail indexing for indexer + execution to still be considered successful. -1 means no limit. Default is 0. + :type max_failed_items: int + :param max_failed_items_per_batch: The maximum number of items in a single batch that can fail + indexing for the batch to still be considered successful. -1 means no limit. Default is 0. + :type max_failed_items_per_batch: int + :param configuration: A dictionary of indexer-specific configuration properties. Each name is + the name of a specific property. Each value must be of a primitive type. + :type configuration: dict[str, object] + """ + + _attribute_map = { + 'batch_size': {'key': 'batchSize', 'type': 'int'}, + 'max_failed_items': {'key': 'maxFailedItems', 'type': 'int'}, + 'max_failed_items_per_batch': {'key': 'maxFailedItemsPerBatch', 'type': 'int'}, + 'configuration': {'key': 'configuration', 'type': '{object}'}, + } + + def __init__( + self, + *, + batch_size: Optional[int] = None, + max_failed_items: Optional[int] = 0, + max_failed_items_per_batch: Optional[int] = 0, + configuration: Optional[Dict[str, object]] = None, + **kwargs + ): + super(IndexingParameters, self).__init__(**kwargs) + self.batch_size = batch_size + self.max_failed_items = max_failed_items + self.max_failed_items_per_batch = max_failed_items_per_batch + self.configuration = configuration + + +class IndexingSchedule(msrest.serialization.Model): + """Represents a schedule for indexer execution. + + All required parameters must be populated in order to send to Azure. + + :param interval: Required. The interval of time between indexer executions. + :type interval: ~datetime.timedelta + :param start_time: The time when an indexer should start running. + :type start_time: ~datetime.datetime + """ + + _validation = { + 'interval': {'required': True}, + } + + _attribute_map = { + 'interval': {'key': 'interval', 'type': 'duration'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + } + + def __init__( + self, + *, + interval: datetime.timedelta, + start_time: Optional[datetime.datetime] = None, + **kwargs + ): + super(IndexingSchedule, self).__init__(**kwargs) + self.interval = interval + self.start_time = start_time + + +class InputFieldMappingEntry(msrest.serialization.Model): + """Input field mapping for a skill. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the input. + :type name: str + :param source: The source of the input. + :type source: str + :param source_context: The source context used for selecting recursive inputs. + :type source_context: str + :param inputs: The recursive inputs used when creating a complex type. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'source': {'key': 'source', 'type': 'str'}, + 'source_context': {'key': 'sourceContext', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + } + + def __init__( + self, + *, + name: str, + source: Optional[str] = None, + source_context: Optional[str] = None, + inputs: Optional[List["InputFieldMappingEntry"]] = None, + **kwargs + ): + super(InputFieldMappingEntry, self).__init__(**kwargs) + self.name = name + self.source = source + self.source_context = source_context + self.inputs = inputs + + +class ItemError(msrest.serialization.Model): + """Represents an item- or document-level indexing error. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar key: The key of the item for which indexing failed. + :vartype key: str + :ivar error_message: Required. The message describing the error that occurred while processing + the item. + :vartype error_message: str + :ivar status_code: Required. The status code indicating why the indexing operation failed. + Possible values include: 400 for a malformed input document, 404 for document not found, 409 + for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the + service is too busy. + :vartype status_code: int + :ivar name: The name of the source at which the error originated. For example, this could refer + to a particular skill in the attached skillset. This may not be always available. + :vartype name: str + :ivar details: Additional, verbose details about the error to assist in debugging the indexer. + This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of errors. This + may not be always available. + :vartype documentation_link: str + """ + + _validation = { + 'key': {'readonly': True}, + 'error_message': {'required': True, 'readonly': True}, + 'status_code': {'required': True, 'readonly': True}, + 'name': {'readonly': True}, + 'details': {'readonly': True}, + 'documentation_link': {'readonly': True}, + } + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + 'status_code': {'key': 'statusCode', 'type': 'int'}, + 'name': {'key': 'name', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ItemError, self).__init__(**kwargs) + self.key = None + self.error_message = None + self.status_code = None + self.name = None + self.details = None + self.documentation_link = None + + +class ItemWarning(msrest.serialization.Model): + """Represents an item-level warning. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar key: The key of the item which generated a warning. + :vartype key: str + :ivar message: Required. The message describing the warning that occurred while processing the + item. + :vartype message: str + :ivar name: The name of the source at which the warning originated. For example, this could + refer to a particular skill in the attached skillset. This may not be always available. + :vartype name: str + :ivar details: Additional, verbose details about the warning to assist in debugging the + indexer. This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of warnings. This + may not be always available. + :vartype documentation_link: str + """ + + _validation = { + 'key': {'readonly': True}, + 'message': {'required': True, 'readonly': True}, + 'name': {'readonly': True}, + 'details': {'readonly': True}, + 'documentation_link': {'readonly': True}, + } + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ItemWarning, self).__init__(**kwargs) + self.key = None + self.message = None + self.name = None + self.details = None + self.documentation_link = None + + +class KeepTokenFilter(TokenFilter): + """A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param keep_words: Required. The list of words to keep. + :type keep_words: list[str] + :param lower_case_keep_words: A value indicating whether to lower case all words first. Default + is false. + :type lower_case_keep_words: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'keep_words': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'keep_words': {'key': 'keepWords', 'type': '[str]'}, + 'lower_case_keep_words': {'key': 'keepWordsCase', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + keep_words: List[str], + lower_case_keep_words: Optional[bool] = False, + **kwargs + ): + super(KeepTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.KeepTokenFilter' + self.keep_words = keep_words + self.lower_case_keep_words = lower_case_keep_words + + +class KeyPhraseExtractionSkill(Skill): + """A skill that uses text analytics for key phrase extraction. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param default_language_code: A value indicating which language code to use. Default is en. + Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'it', 'ja', 'ko', 'no', 'pl', 'pt- + PT', 'pt-BR', 'ru', 'es', 'sv'. + :type default_language_code: str or + ~search_service_client.models.KeyPhraseExtractionSkillLanguage + :param max_key_phrase_count: A number indicating how many key phrases to return. If absent, all + identified key phrases will be returned. + :type max_key_phrase_count: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'}, + 'max_key_phrase_count': {'key': 'maxKeyPhraseCount', 'type': 'int'}, + } + + def __init__( + self, + *, + inputs: List["InputFieldMappingEntry"], + outputs: List["OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_language_code: Optional[Union[str, "KeyPhraseExtractionSkillLanguage"]] = None, + max_key_phrase_count: Optional[int] = None, + **kwargs + ): + super(KeyPhraseExtractionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) + self.odata_type = '#Microsoft.Skills.Text.KeyPhraseExtractionSkill' + self.default_language_code = default_language_code + self.max_key_phrase_count = max_key_phrase_count + + +class KeywordMarkerTokenFilter(TokenFilter): + """Marks terms as keywords. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param keywords: Required. A list of words to mark as keywords. + :type keywords: list[str] + :param ignore_case: A value indicating whether to ignore case. If true, all words are converted + to lower case first. Default is false. + :type ignore_case: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'keywords': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'keywords': {'key': 'keywords', 'type': '[str]'}, + 'ignore_case': {'key': 'ignoreCase', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + keywords: List[str], + ignore_case: Optional[bool] = False, + **kwargs + ): + super(KeywordMarkerTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' + self.keywords = keywords + self.ignore_case = ignore_case + + +class KeywordTokenizer(Tokenizer): + """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param buffer_size: The read buffer size in bytes. Default is 256. + :type buffer_size: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'buffer_size': {'key': 'bufferSize', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + buffer_size: Optional[int] = 256, + **kwargs + ): + super(KeywordTokenizer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizer' + self.buffer_size = buffer_size + + +class KeywordTokenizerV2(Tokenizer): + """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 256. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 256, + **kwargs + ): + super(KeywordTokenizerV2, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizerV2' + self.max_token_length = max_token_length + + +class LanguageDetectionSkill(Skill): + """A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + } + + def __init__( + self, + *, + inputs: List["InputFieldMappingEntry"], + outputs: List["OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + **kwargs + ): + super(LanguageDetectionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) + self.odata_type = '#Microsoft.Skills.Text.LanguageDetectionSkill' + + +class LengthTokenFilter(TokenFilter): + """Removes words that are too long or too short. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param min: The minimum length in characters. Default is 0. Maximum is 300. Must be less than + the value of max. + :type min: int + :param max: The maximum length in characters. Default and maximum is 300. + :type max: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'min': {'maximum': 300}, + 'max': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min': {'key': 'min', 'type': 'int'}, + 'max': {'key': 'max', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + min: Optional[int] = 0, + max: Optional[int] = 300, + **kwargs + ): + super(LengthTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.LengthTokenFilter' + self.min = min + self.max = max + + +class LimitTokenFilter(TokenFilter): + """Limits the number of tokens while indexing. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param max_token_count: The maximum number of tokens to produce. Default is 1. + :type max_token_count: int + :param consume_all_tokens: A value indicating whether all tokens from the input must be + consumed even if maxTokenCount is reached. Default is false. + :type consume_all_tokens: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_count': {'key': 'maxTokenCount', 'type': 'int'}, + 'consume_all_tokens': {'key': 'consumeAllTokens', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + max_token_count: Optional[int] = 1, + consume_all_tokens: Optional[bool] = False, + **kwargs + ): + super(LimitTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.LimitTokenFilter' + self.max_token_count = max_token_count + self.consume_all_tokens = consume_all_tokens + + +class ListDataSourcesResult(msrest.serialization.Model): + """Response from a List Datasources request. If successful, it includes the full definitions of all datasources. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar data_sources: Required. The datasources in the Search service. + :vartype data_sources: list[~search_service_client.models.DataSource] + """ + + _validation = { + 'data_sources': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'data_sources': {'key': 'value', 'type': '[DataSource]'}, + } + + def __init__( + self, + **kwargs + ): + super(ListDataSourcesResult, self).__init__(**kwargs) + self.data_sources = None + + +class ListIndexersResult(msrest.serialization.Model): + """Response from a List Indexers request. If successful, it includes the full definitions of all indexers. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar indexers: Required. The indexers in the Search service. + :vartype indexers: list[~search_service_client.models.Indexer] + """ + + _validation = { + 'indexers': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'indexers': {'key': 'value', 'type': '[Indexer]'}, + } + + def __init__( + self, + **kwargs + ): + super(ListIndexersResult, self).__init__(**kwargs) + self.indexers = None + + +class ListIndexesResult(msrest.serialization.Model): + """Response from a List Indexes request. If successful, it includes the full definitions of all indexes. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar indexes: Required. The indexes in the Search service. + :vartype indexes: list[~search_service_client.models.Index] + """ + + _validation = { + 'indexes': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'indexes': {'key': 'value', 'type': '[Index]'}, + } + + def __init__( + self, + **kwargs + ): + super(ListIndexesResult, self).__init__(**kwargs) + self.indexes = None + + +class ListSkillsetsResult(msrest.serialization.Model): + """Response from a list Skillset request. If successful, it includes the full definitions of all skillsets. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar skillsets: Required. The skillsets defined in the Search service. + :vartype skillsets: list[~search_service_client.models.Skillset] + """ + + _validation = { + 'skillsets': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'skillsets': {'key': 'value', 'type': '[Skillset]'}, + } + + def __init__( + self, + **kwargs + ): + super(ListSkillsetsResult, self).__init__(**kwargs) + self.skillsets = None + + +class ListSynonymMapsResult(msrest.serialization.Model): + """Response from a List SynonymMaps request. If successful, it includes the full definitions of all synonym maps. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar synonym_maps: Required. The synonym maps in the Search service. + :vartype synonym_maps: list[~search_service_client.models.SynonymMap] + """ + + _validation = { + 'synonym_maps': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'synonym_maps': {'key': 'value', 'type': '[SynonymMap]'}, + } + + def __init__( + self, + **kwargs + ): + super(ListSynonymMapsResult, self).__init__(**kwargs) + self.synonym_maps = None + + +class MagnitudeScoringFunction(ScoringFunction): + """Defines a function that boosts scores based on the magnitude of a numeric field. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param field_name: Required. The name of the field used as input to the scoring function. + :type field_name: str + :param boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :type boost: float + :param interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', + 'logarithmic'. + :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + :param parameters: Required. Parameter values for the magnitude scoring function. + :type parameters: ~search_service_client.models.MagnitudeScoringParameters + """ + + _validation = { + 'type': {'required': True}, + 'field_name': {'required': True}, + 'boost': {'required': True}, + 'parameters': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'field_name': {'key': 'fieldName', 'type': 'str'}, + 'boost': {'key': 'boost', 'type': 'float'}, + 'interpolation': {'key': 'interpolation', 'type': 'str'}, + 'parameters': {'key': 'magnitude', 'type': 'MagnitudeScoringParameters'}, + } + + def __init__( + self, + *, + field_name: str, + boost: float, + parameters: "MagnitudeScoringParameters", + interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None, + **kwargs + ): + super(MagnitudeScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) + self.type = 'magnitude' + self.parameters = parameters + + +class MagnitudeScoringParameters(msrest.serialization.Model): + """Provides parameter values to a magnitude scoring function. + + All required parameters must be populated in order to send to Azure. + + :param boosting_range_start: Required. The field value at which boosting starts. + :type boosting_range_start: float + :param boosting_range_end: Required. The field value at which boosting ends. + :type boosting_range_end: float + :param should_boost_beyond_range_by_constant: A value indicating whether to apply a constant + boost for field values beyond the range end value; default is false. + :type should_boost_beyond_range_by_constant: bool + """ + + _validation = { + 'boosting_range_start': {'required': True}, + 'boosting_range_end': {'required': True}, + } + + _attribute_map = { + 'boosting_range_start': {'key': 'boostingRangeStart', 'type': 'float'}, + 'boosting_range_end': {'key': 'boostingRangeEnd', 'type': 'float'}, + 'should_boost_beyond_range_by_constant': {'key': 'constantBoostBeyondRange', 'type': 'bool'}, + } + + def __init__( + self, + *, + boosting_range_start: float, + boosting_range_end: float, + should_boost_beyond_range_by_constant: Optional[bool] = None, + **kwargs + ): + super(MagnitudeScoringParameters, self).__init__(**kwargs) + self.boosting_range_start = boosting_range_start + self.boosting_range_end = boosting_range_end + self.should_boost_beyond_range_by_constant = should_boost_beyond_range_by_constant + + +class MappingCharFilter(CharFilter): + """A character filter that applies mappings defined with the mappings option. Matching is greedy (longest pattern matching at a given point wins). Replacement is allowed to be the empty string. This character filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the char filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param mappings: Required. A list of mappings of the following format: "a=>b" (all occurrences + of the character "a" will be replaced with character "b"). + :type mappings: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'mappings': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'mappings': {'key': 'mappings', 'type': '[str]'}, + } + + def __init__( + self, + *, + name: str, + mappings: List[str], + **kwargs + ): + super(MappingCharFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.MappingCharFilter' + self.mappings = mappings + + +class MergeSkill(Skill): + """A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is + an empty space. + :type insert_pre_tag: str + :param insert_post_tag: The tag indicates the end of the merged text. By default, the tag is an + empty space. + :type insert_post_tag: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'insert_pre_tag': {'key': 'insertPreTag', 'type': 'str'}, + 'insert_post_tag': {'key': 'insertPostTag', 'type': 'str'}, + } + + def __init__( + self, + *, + inputs: List["InputFieldMappingEntry"], + outputs: List["OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + insert_pre_tag: Optional[str] = " ", + insert_post_tag: Optional[str] = " ", + **kwargs + ): + super(MergeSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) + self.odata_type = '#Microsoft.Skills.Text.MergeSkill' + self.insert_pre_tag = insert_pre_tag + self.insert_post_tag = insert_post_tag + + +class MicrosoftLanguageStemmingTokenizer(Tokenizer): + """Divides text using language-specific rules and reduces words to their base forms. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Tokens longer than the maximum length are + split. Maximum token length that can be used is 300 characters. Tokens longer than 300 + characters are first split into tokens of length 300 and then each of those tokens is split + based on the max token length set. Default is 255. + :type max_token_length: int + :param is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used + as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. + :type is_search_tokenizer: bool + :param language: The language to use. The default is English. Possible values include: + 'arabic', 'bangla', 'bulgarian', 'catalan', 'croatian', 'czech', 'danish', 'dutch', 'english', + 'estonian', 'finnish', 'french', 'german', 'greek', 'gujarati', 'hebrew', 'hindi', 'hungarian', + 'icelandic', 'indonesian', 'italian', 'kannada', 'latvian', 'lithuanian', 'malay', 'malayalam', + 'marathi', 'norwegianBokmaal', 'polish', 'portuguese', 'portugueseBrazilian', 'punjabi', + 'romanian', 'russian', 'serbianCyrillic', 'serbianLatin', 'slovak', 'slovenian', 'spanish', + 'swedish', 'tamil', 'telugu', 'turkish', 'ukrainian', 'urdu'. + :type language: str or ~search_service_client.models.MicrosoftStemmingTokenizerLanguage + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + 'is_search_tokenizer': {'key': 'isSearchTokenizer', 'type': 'bool'}, + 'language': {'key': 'language', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 255, + is_search_tokenizer: Optional[bool] = False, + language: Optional[Union[str, "MicrosoftStemmingTokenizerLanguage"]] = None, + **kwargs + ): + super(MicrosoftLanguageStemmingTokenizer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer' + self.max_token_length = max_token_length + self.is_search_tokenizer = is_search_tokenizer + self.language = language + + +class MicrosoftLanguageTokenizer(Tokenizer): + """Divides text using language-specific rules. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Tokens longer than the maximum length are + split. Maximum token length that can be used is 300 characters. Tokens longer than 300 + characters are first split into tokens of length 300 and then each of those tokens is split + based on the max token length set. Default is 255. + :type max_token_length: int + :param is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used + as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. + :type is_search_tokenizer: bool + :param language: The language to use. The default is English. Possible values include: + 'bangla', 'bulgarian', 'catalan', 'chineseSimplified', 'chineseTraditional', 'croatian', + 'czech', 'danish', 'dutch', 'english', 'french', 'german', 'greek', 'gujarati', 'hindi', + 'icelandic', 'indonesian', 'italian', 'japanese', 'kannada', 'korean', 'malay', 'malayalam', + 'marathi', 'norwegianBokmaal', 'polish', 'portuguese', 'portugueseBrazilian', 'punjabi', + 'romanian', 'russian', 'serbianCyrillic', 'serbianLatin', 'slovenian', 'spanish', 'swedish', + 'tamil', 'telugu', 'thai', 'ukrainian', 'urdu', 'vietnamese'. + :type language: str or ~search_service_client.models.MicrosoftTokenizerLanguage + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + 'is_search_tokenizer': {'key': 'isSearchTokenizer', 'type': 'bool'}, + 'language': {'key': 'language', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 255, + is_search_tokenizer: Optional[bool] = False, + language: Optional[Union[str, "MicrosoftTokenizerLanguage"]] = None, + **kwargs + ): + super(MicrosoftLanguageTokenizer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer' + self.max_token_length = max_token_length + self.is_search_tokenizer = is_search_tokenizer + self.language = language + + +class NGramTokenFilter(TokenFilter): + """Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + maxGram. + :type min_gram: int + :param max_gram: The maximum n-gram length. Default is 2. + :type max_gram: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min_gram': {'key': 'minGram', 'type': 'int'}, + 'max_gram': {'key': 'maxGram', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + min_gram: Optional[int] = 1, + max_gram: Optional[int] = 2, + **kwargs + ): + super(NGramTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilter' + self.min_gram = min_gram + self.max_gram = max_gram + + +class NGramTokenFilterV2(TokenFilter): + """Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :type min_gram: int + :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :type max_gram: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'min_gram': {'maximum': 300}, + 'max_gram': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min_gram': {'key': 'minGram', 'type': 'int'}, + 'max_gram': {'key': 'maxGram', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + min_gram: Optional[int] = 1, + max_gram: Optional[int] = 2, + **kwargs + ): + super(NGramTokenFilterV2, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilterV2' + self.min_gram = min_gram + self.max_gram = max_gram + + +class NGramTokenizer(Tokenizer): + """Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :type min_gram: int + :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :type max_gram: int + :param token_chars: Character classes to keep in the tokens. + :type token_chars: list[str or ~search_service_client.models.TokenCharacterKind] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'min_gram': {'maximum': 300}, + 'max_gram': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'min_gram': {'key': 'minGram', 'type': 'int'}, + 'max_gram': {'key': 'maxGram', 'type': 'int'}, + 'token_chars': {'key': 'tokenChars', 'type': '[str]'}, + } + + def __init__( + self, + *, + name: str, + min_gram: Optional[int] = 1, + max_gram: Optional[int] = 2, + token_chars: Optional[List[Union[str, "TokenCharacterKind"]]] = None, + **kwargs + ): + super(NGramTokenizer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.NGramTokenizer' + self.min_gram = min_gram + self.max_gram = max_gram + self.token_chars = token_chars + + +class OcrSkill(Skill): + """A skill that extracts text from image files. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param text_extraction_algorithm: A value indicating which algorithm to use for extracting + text. Default is printed. Possible values include: 'printed', 'handwritten'. + :type text_extraction_algorithm: str or ~search_service_client.models.TextExtractionAlgorithm + :param default_language_code: A value indicating which language code to use. Default is en. + Possible values include: 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', + 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr- + Latn', 'sk'. + :type default_language_code: str or ~search_service_client.models.OcrSkillLanguage + :param should_detect_orientation: A value indicating to turn orientation detection on or not. + Default is false. + :type should_detect_orientation: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'text_extraction_algorithm': {'key': 'textExtractionAlgorithm', 'type': 'str'}, + 'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'}, + 'should_detect_orientation': {'key': 'detectOrientation', 'type': 'bool'}, + } + + def __init__( + self, + *, + inputs: List["InputFieldMappingEntry"], + outputs: List["OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + text_extraction_algorithm: Optional[Union[str, "TextExtractionAlgorithm"]] = None, + default_language_code: Optional[Union[str, "OcrSkillLanguage"]] = None, + should_detect_orientation: Optional[bool] = False, + **kwargs + ): + super(OcrSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) + self.odata_type = '#Microsoft.Skills.Vision.OcrSkill' + self.text_extraction_algorithm = text_extraction_algorithm + self.default_language_code = default_language_code + self.should_detect_orientation = should_detect_orientation + + +class OutputFieldMappingEntry(msrest.serialization.Model): + """Output field mapping for a skill. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the output defined by the skill. + :type name: str + :param target_name: The target name of the output. It is optional and default to name. + :type target_name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'target_name': {'key': 'targetName', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + target_name: Optional[str] = None, + **kwargs + ): + super(OutputFieldMappingEntry, self).__init__(**kwargs) + self.name = name + self.target_name = target_name + + +class PathHierarchyTokenizerV2(Tokenizer): + """Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param delimiter: The delimiter character to use. Default is "/". + :type delimiter: str + :param replacement: A value that, if set, replaces the delimiter character. Default is "/". + :type replacement: str + :param max_token_length: The maximum token length. Default and maximum is 300. + :type max_token_length: int + :param reverse_token_order: A value indicating whether to generate tokens in reverse order. + Default is false. + :type reverse_token_order: bool + :param number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0. + :type number_of_tokens_to_skip: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'delimiter': {'key': 'delimiter', 'type': 'str'}, + 'replacement': {'key': 'replacement', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + 'reverse_token_order': {'key': 'reverse', 'type': 'bool'}, + 'number_of_tokens_to_skip': {'key': 'skip', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + delimiter: Optional[str] = "/", + replacement: Optional[str] = "/", + max_token_length: Optional[int] = 300, + reverse_token_order: Optional[bool] = False, + number_of_tokens_to_skip: Optional[int] = 0, + **kwargs + ): + super(PathHierarchyTokenizerV2, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.PathHierarchyTokenizerV2' + self.delimiter = delimiter + self.replacement = replacement + self.max_token_length = max_token_length + self.reverse_token_order = reverse_token_order + self.number_of_tokens_to_skip = number_of_tokens_to_skip + + +class PatternAnalyzer(Analyzer): + """Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param lower_case_terms: A value indicating whether terms should be lower-cased. Default is + true. + :type lower_case_terms: bool + :param pattern: A regular expression pattern to match token separators. Default is an + expression that matches one or more whitespace characters. + :type pattern: str + :param flags: Regular expression flags. Possible values include: 'CANON_EQ', + 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. + :type flags: str or ~search_service_client.models.RegexFlags + :param stopwords: A list of stopwords. + :type stopwords: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'lower_case_terms': {'key': 'lowercase', 'type': 'bool'}, + 'pattern': {'key': 'pattern', 'type': 'str'}, + 'flags': {'key': 'flags', 'type': 'str'}, + 'stopwords': {'key': 'stopwords', 'type': '[str]'}, + } + + def __init__( + self, + *, + name: str, + lower_case_terms: Optional[bool] = True, + pattern: Optional[str] = "\W+", + flags: Optional[Union[str, "RegexFlags"]] = None, + stopwords: Optional[List[str]] = None, + **kwargs + ): + super(PatternAnalyzer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.PatternAnalyzer' + self.lower_case_terms = lower_case_terms + self.pattern = pattern + self.flags = flags + self.stopwords = stopwords + + +class PatternCaptureTokenFilter(TokenFilter): + """Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param patterns: Required. A list of patterns to match against each token. + :type patterns: list[str] + :param preserve_original: A value indicating whether to return the original token even if one + of the patterns matches. Default is true. + :type preserve_original: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'patterns': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'patterns': {'key': 'patterns', 'type': '[str]'}, + 'preserve_original': {'key': 'preserveOriginal', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + patterns: List[str], + preserve_original: Optional[bool] = True, + **kwargs + ): + super(PatternCaptureTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.PatternCaptureTokenFilter' + self.patterns = patterns + self.preserve_original = preserve_original + + +class PatternReplaceCharFilter(CharFilter): + """A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the char filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param pattern: Required. A regular expression pattern. + :type pattern: str + :param replacement: Required. The replacement text. + :type replacement: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'pattern': {'required': True}, + 'replacement': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'pattern': {'key': 'pattern', 'type': 'str'}, + 'replacement': {'key': 'replacement', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + pattern: str, + replacement: str, + **kwargs + ): + super(PatternReplaceCharFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.PatternReplaceCharFilter' + self.pattern = pattern + self.replacement = replacement + + +class PatternReplaceTokenFilter(TokenFilter): + """A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param pattern: Required. A regular expression pattern. + :type pattern: str + :param replacement: Required. The replacement text. + :type replacement: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'pattern': {'required': True}, + 'replacement': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'pattern': {'key': 'pattern', 'type': 'str'}, + 'replacement': {'key': 'replacement', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + pattern: str, + replacement: str, + **kwargs + ): + super(PatternReplaceTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' + self.pattern = pattern + self.replacement = replacement + + +class PatternTokenizer(Tokenizer): + """Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param pattern: A regular expression pattern to match token separators. Default is an + expression that matches one or more whitespace characters. + :type pattern: str + :param flags: Regular expression flags. Possible values include: 'CANON_EQ', + 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. + :type flags: str or ~search_service_client.models.RegexFlags + :param group: The zero-based ordinal of the matching group in the regular expression pattern to + extract into tokens. Use -1 if you want to use the entire pattern to split the input into + tokens, irrespective of matching groups. Default is -1. + :type group: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'pattern': {'key': 'pattern', 'type': 'str'}, + 'flags': {'key': 'flags', 'type': 'str'}, + 'group': {'key': 'group', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + pattern: Optional[str] = "\W+", + flags: Optional[Union[str, "RegexFlags"]] = None, + group: Optional[int] = -1, + **kwargs + ): + super(PatternTokenizer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.PatternTokenizer' + self.pattern = pattern + self.flags = flags + self.group = group + + +class PhoneticTokenFilter(TokenFilter): + """Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param encoder: The phonetic encoder to use. Default is "metaphone". Possible values include: + 'metaphone', 'doubleMetaphone', 'soundex', 'refinedSoundex', 'caverphone1', 'caverphone2', + 'cologne', 'nysiis', 'koelnerPhonetik', 'haasePhonetik', 'beiderMorse'. + :type encoder: str or ~search_service_client.models.PhoneticEncoder + :param replace_original_tokens: A value indicating whether encoded tokens should replace + original tokens. If false, encoded tokens are added as synonyms. Default is true. + :type replace_original_tokens: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'encoder': {'key': 'encoder', 'type': 'str'}, + 'replace_original_tokens': {'key': 'replace', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + encoder: Optional[Union[str, "PhoneticEncoder"]] = None, + replace_original_tokens: Optional[bool] = True, + **kwargs + ): + super(PhoneticTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.PhoneticTokenFilter' + self.encoder = encoder + self.replace_original_tokens = replace_original_tokens + + +class RequestOptions(msrest.serialization.Model): + """Parameter group. + + :param x_ms_client_request_id: The tracking ID sent with the request to help with debugging. + :type x_ms_client_request_id: str + """ + + _attribute_map = { + 'x_ms_client_request_id': {'key': 'x-ms-client-request-id', 'type': 'str'}, + } + + def __init__( + self, + *, + x_ms_client_request_id: Optional[str] = None, + **kwargs + ): + super(RequestOptions, self).__init__(**kwargs) + self.x_ms_client_request_id = x_ms_client_request_id + + +class ResourceCounter(msrest.serialization.Model): + """Represents a resource's usage and quota. + + All required parameters must be populated in order to send to Azure. + + :param usage: Required. The resource usage amount. + :type usage: long + :param quota: The resource amount quota. + :type quota: long + """ + + _validation = { + 'usage': {'required': True}, + } + + _attribute_map = { + 'usage': {'key': 'usage', 'type': 'long'}, + 'quota': {'key': 'quota', 'type': 'long'}, + } + + def __init__( + self, + *, + usage: int, + quota: Optional[int] = None, + **kwargs + ): + super(ResourceCounter, self).__init__(**kwargs) + self.usage = usage + self.quota = quota + + +class ScoringProfile(msrest.serialization.Model): + """Defines parameters for a search index that influence scoring in search queries. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the scoring profile. + :type name: str + :param text_weights: Parameters that boost scoring based on text matches in certain index + fields. + :type text_weights: ~search_service_client.models.TextWeights + :param functions: The collection of functions that influence the scoring of documents. + :type functions: list[~search_service_client.models.ScoringFunction] + :param function_aggregation: A value indicating how the results of individual scoring functions + should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Possible + values include: 'sum', 'average', 'minimum', 'maximum', 'firstMatching'. + :type function_aggregation: str or ~search_service_client.models.ScoringFunctionAggregation + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'text_weights': {'key': 'text', 'type': 'TextWeights'}, + 'functions': {'key': 'functions', 'type': '[ScoringFunction]'}, + 'function_aggregation': {'key': 'functionAggregation', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + text_weights: Optional["TextWeights"] = None, + functions: Optional[List["ScoringFunction"]] = None, + function_aggregation: Optional[Union[str, "ScoringFunctionAggregation"]] = None, + **kwargs + ): + super(ScoringProfile, self).__init__(**kwargs) + self.name = name + self.text_weights = text_weights + self.functions = functions + self.function_aggregation = function_aggregation + + +class SearchError(msrest.serialization.Model): + """Describes an error condition for the Azure Cognitive Search API. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar code: One of a server-defined set of error codes. + :vartype code: str + :ivar message: Required. A human-readable representation of the error. + :vartype message: str + :ivar details: An array of details about specific errors that led to this reported error. + :vartype details: list[~search_service_client.models.SearchError] + """ + + _validation = { + 'code': {'readonly': True}, + 'message': {'required': True, 'readonly': True}, + 'details': {'readonly': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[SearchError]'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchError, self).__init__(**kwargs) + self.code = None + self.message = None + self.details = None + + +class SentimentSkill(Skill): + """Text analytics positive-negative sentiment analysis, scored as a floating point value in a range of zero to 1. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param default_language_code: A value indicating which language code to use. Default is en. + Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'it', 'no', 'pl', 'pt-PT', + 'ru', 'es', 'sv', 'tr'. + :type default_language_code: str or ~search_service_client.models.SentimentSkillLanguage + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'}, + } + + def __init__( + self, + *, + inputs: List["InputFieldMappingEntry"], + outputs: List["OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_language_code: Optional[Union[str, "SentimentSkillLanguage"]] = None, + **kwargs + ): + super(SentimentSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) + self.odata_type = '#Microsoft.Skills.Text.SentimentSkill' + self.default_language_code = default_language_code + + +class ServiceCounters(msrest.serialization.Model): + """Represents service-level resource counters and quotas. + + All required parameters must be populated in order to send to Azure. + + :param document_counter: Required. Total number of documents across all indexes in the service. + :type document_counter: ~search_service_client.models.ResourceCounter + :param index_counter: Required. Total number of indexes. + :type index_counter: ~search_service_client.models.ResourceCounter + :param indexer_counter: Required. Total number of indexers. + :type indexer_counter: ~search_service_client.models.ResourceCounter + :param data_source_counter: Required. Total number of data sources. + :type data_source_counter: ~search_service_client.models.ResourceCounter + :param storage_size_counter: Required. Total size of used storage in bytes. + :type storage_size_counter: ~search_service_client.models.ResourceCounter + :param synonym_map_counter: Required. Total number of synonym maps. + :type synonym_map_counter: ~search_service_client.models.ResourceCounter + :param skillset_counter: Required. Total number of skillsets. + :type skillset_counter: ~search_service_client.models.ResourceCounter + """ + + _validation = { + 'document_counter': {'required': True}, + 'index_counter': {'required': True}, + 'indexer_counter': {'required': True}, + 'data_source_counter': {'required': True}, + 'storage_size_counter': {'required': True}, + 'synonym_map_counter': {'required': True}, + 'skillset_counter': {'required': True}, + } + + _attribute_map = { + 'document_counter': {'key': 'documentCount', 'type': 'ResourceCounter'}, + 'index_counter': {'key': 'indexesCount', 'type': 'ResourceCounter'}, + 'indexer_counter': {'key': 'indexersCount', 'type': 'ResourceCounter'}, + 'data_source_counter': {'key': 'dataSourcesCount', 'type': 'ResourceCounter'}, + 'storage_size_counter': {'key': 'storageSize', 'type': 'ResourceCounter'}, + 'synonym_map_counter': {'key': 'synonymMaps', 'type': 'ResourceCounter'}, + 'skillset_counter': {'key': 'skillsetCount', 'type': 'ResourceCounter'}, + } + + def __init__( + self, + *, + document_counter: "ResourceCounter", + index_counter: "ResourceCounter", + indexer_counter: "ResourceCounter", + data_source_counter: "ResourceCounter", + storage_size_counter: "ResourceCounter", + synonym_map_counter: "ResourceCounter", + skillset_counter: "ResourceCounter", + **kwargs + ): + super(ServiceCounters, self).__init__(**kwargs) + self.document_counter = document_counter + self.index_counter = index_counter + self.indexer_counter = indexer_counter + self.data_source_counter = data_source_counter + self.storage_size_counter = storage_size_counter + self.synonym_map_counter = synonym_map_counter + self.skillset_counter = skillset_counter + + +class ServiceLimits(msrest.serialization.Model): + """Represents various service level limits. + + :param max_fields_per_index: The maximum allowed fields per index. + :type max_fields_per_index: int + :param max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in an + index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. + :type max_field_nesting_depth_per_index: int + :param max_complex_collection_fields_per_index: The maximum number of fields of type + Collection(Edm.ComplexType) allowed in an index. + :type max_complex_collection_fields_per_index: int + :param max_complex_objects_in_collections_per_document: The maximum number of objects in + complex collections allowed per document. + :type max_complex_objects_in_collections_per_document: int + """ + + _attribute_map = { + 'max_fields_per_index': {'key': 'maxFieldsPerIndex', 'type': 'int'}, + 'max_field_nesting_depth_per_index': {'key': 'maxFieldNestingDepthPerIndex', 'type': 'int'}, + 'max_complex_collection_fields_per_index': {'key': 'maxComplexCollectionFieldsPerIndex', 'type': 'int'}, + 'max_complex_objects_in_collections_per_document': {'key': 'maxComplexObjectsInCollectionsPerDocument', 'type': 'int'}, + } + + def __init__( + self, + *, + max_fields_per_index: Optional[int] = None, + max_field_nesting_depth_per_index: Optional[int] = None, + max_complex_collection_fields_per_index: Optional[int] = None, + max_complex_objects_in_collections_per_document: Optional[int] = None, + **kwargs + ): + super(ServiceLimits, self).__init__(**kwargs) + self.max_fields_per_index = max_fields_per_index + self.max_field_nesting_depth_per_index = max_field_nesting_depth_per_index + self.max_complex_collection_fields_per_index = max_complex_collection_fields_per_index + self.max_complex_objects_in_collections_per_document = max_complex_objects_in_collections_per_document + + +class ServiceStatistics(msrest.serialization.Model): + """Response from a get service statistics request. If successful, it includes service level counters and limits. + + All required parameters must be populated in order to send to Azure. + + :param counters: Required. Service level resource counters. + :type counters: ~search_service_client.models.ServiceCounters + :param limits: Required. Service level general limits. + :type limits: ~search_service_client.models.ServiceLimits + """ + + _validation = { + 'counters': {'required': True}, + 'limits': {'required': True}, + } + + _attribute_map = { + 'counters': {'key': 'counters', 'type': 'ServiceCounters'}, + 'limits': {'key': 'limits', 'type': 'ServiceLimits'}, + } + + def __init__( + self, + *, + counters: "ServiceCounters", + limits: "ServiceLimits", + **kwargs + ): + super(ServiceStatistics, self).__init__(**kwargs) + self.counters = counters + self.limits = limits + + +class ShaperSkill(Skill): + """A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + } + + def __init__( + self, + *, + inputs: List["InputFieldMappingEntry"], + outputs: List["OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + **kwargs + ): + super(ShaperSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) + self.odata_type = '#Microsoft.Skills.Util.ShaperSkill' + + +class ShingleTokenFilter(TokenFilter): + """Creates combinations of tokens as a single token. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param max_shingle_size: The maximum shingle size. Default and minimum value is 2. + :type max_shingle_size: int + :param min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be less + than the value of maxShingleSize. + :type min_shingle_size: int + :param output_unigrams: A value indicating whether the output stream will contain the input + tokens (unigrams) as well as shingles. Default is true. + :type output_unigrams: bool + :param output_unigrams_if_no_shingles: A value indicating whether to output unigrams for those + times when no shingles are available. This property takes precedence when outputUnigrams is set + to false. Default is false. + :type output_unigrams_if_no_shingles: bool + :param token_separator: The string to use when joining adjacent tokens to form a shingle. + Default is a single space (" "). + :type token_separator: str + :param filter_token: The string to insert for each position at which there is no token. Default + is an underscore ("_"). + :type filter_token: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_shingle_size': {'minimum': 2}, + 'min_shingle_size': {'minimum': 2}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_shingle_size': {'key': 'maxShingleSize', 'type': 'int'}, + 'min_shingle_size': {'key': 'minShingleSize', 'type': 'int'}, + 'output_unigrams': {'key': 'outputUnigrams', 'type': 'bool'}, + 'output_unigrams_if_no_shingles': {'key': 'outputUnigramsIfNoShingles', 'type': 'bool'}, + 'token_separator': {'key': 'tokenSeparator', 'type': 'str'}, + 'filter_token': {'key': 'filterToken', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + max_shingle_size: Optional[int] = 2, + min_shingle_size: Optional[int] = 2, + output_unigrams: Optional[bool] = True, + output_unigrams_if_no_shingles: Optional[bool] = False, + token_separator: Optional[str] = " ", + filter_token: Optional[str] = "_", + **kwargs + ): + super(ShingleTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.ShingleTokenFilter' + self.max_shingle_size = max_shingle_size + self.min_shingle_size = min_shingle_size + self.output_unigrams = output_unigrams + self.output_unigrams_if_no_shingles = output_unigrams_if_no_shingles + self.token_separator = token_separator + self.filter_token = filter_token + + +class Skillset(msrest.serialization.Model): + """A list of skills. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the skillset. + :type name: str + :param description: Required. The description of the skillset. + :type description: str + :param skills: Required. A list of skills in the skillset. + :type skills: list[~search_service_client.models.Skill] + :param cognitive_services_account: Details about cognitive services to be used when running + skills. + :type cognitive_services_account: ~search_service_client.models.CognitiveServicesAccount + :param e_tag: The ETag of the skillset. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'description': {'required': True}, + 'skills': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'skills': {'key': 'skills', 'type': '[Skill]'}, + 'cognitive_services_account': {'key': 'cognitiveServices', 'type': 'CognitiveServicesAccount'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + description: str, + skills: List["Skill"], + cognitive_services_account: Optional["CognitiveServicesAccount"] = None, + e_tag: Optional[str] = None, + **kwargs + ): + super(Skillset, self).__init__(**kwargs) + self.name = name + self.description = description + self.skills = skills + self.cognitive_services_account = cognitive_services_account + self.e_tag = e_tag + + +class SnowballTokenFilter(TokenFilter): + """A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param language: Required. The language to use. Possible values include: 'armenian', 'basque', + 'catalan', 'danish', 'dutch', 'english', 'finnish', 'french', 'german', 'german2', 'hungarian', + 'italian', 'kp', 'lovins', 'norwegian', 'porter', 'portuguese', 'romanian', 'russian', + 'spanish', 'swedish', 'turkish'. + :type language: str or ~search_service_client.models.SnowballTokenFilterLanguage + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'language': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'language': {'key': 'language', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + language: Union[str, "SnowballTokenFilterLanguage"], + **kwargs + ): + super(SnowballTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.SnowballTokenFilter' + self.language = language + + +class SoftDeleteColumnDeletionDetectionPolicy(DataDeletionDetectionPolicy): + """Defines a data deletion detection policy that implements a soft-deletion strategy. It determines whether an item should be deleted based on the value of a designated 'soft delete' column. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param soft_delete_column_name: The name of the column to use for soft-deletion detection. + :type soft_delete_column_name: str + :param soft_delete_marker_value: The marker value that identifies an item as deleted. + :type soft_delete_marker_value: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'soft_delete_column_name': {'key': 'softDeleteColumnName', 'type': 'str'}, + 'soft_delete_marker_value': {'key': 'softDeleteMarkerValue', 'type': 'str'}, + } + + def __init__( + self, + *, + soft_delete_column_name: Optional[str] = None, + soft_delete_marker_value: Optional[str] = None, + **kwargs + ): + super(SoftDeleteColumnDeletionDetectionPolicy, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy' + self.soft_delete_column_name = soft_delete_column_name + self.soft_delete_marker_value = soft_delete_marker_value + + +class SplitSkill(Skill): + """A skill to split a string into chunks of text. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param default_language_code: A value indicating which language code to use. Default is en. + Possible values include: 'da', 'de', 'en', 'es', 'fi', 'fr', 'it', 'ko', 'pt'. + :type default_language_code: str or ~search_service_client.models.SplitSkillLanguage + :param text_split_mode: A value indicating which split mode to perform. Possible values + include: 'pages', 'sentences'. + :type text_split_mode: str or ~search_service_client.models.TextSplitMode + :param maximum_page_length: The desired maximum page length. Default is 10000. + :type maximum_page_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'default_language_code': {'key': 'defaultLanguageCode', 'type': 'str'}, + 'text_split_mode': {'key': 'textSplitMode', 'type': 'str'}, + 'maximum_page_length': {'key': 'maximumPageLength', 'type': 'int'}, + } + + def __init__( + self, + *, + inputs: List["InputFieldMappingEntry"], + outputs: List["OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_language_code: Optional[Union[str, "SplitSkillLanguage"]] = None, + text_split_mode: Optional[Union[str, "TextSplitMode"]] = None, + maximum_page_length: Optional[int] = None, + **kwargs + ): + super(SplitSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) + self.odata_type = '#Microsoft.Skills.Text.SplitSkill' + self.default_language_code = default_language_code + self.text_split_mode = text_split_mode + self.maximum_page_length = maximum_page_length + + +class SqlIntegratedChangeTrackingPolicy(DataChangeDetectionPolicy): + """Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SqlIntegratedChangeTrackingPolicy, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' + + +class StandardAnalyzer(Analyzer): + """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + :param stopwords: A list of stopwords. + :type stopwords: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + 'stopwords': {'key': 'stopwords', 'type': '[str]'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 255, + stopwords: Optional[List[str]] = None, + **kwargs + ): + super(StandardAnalyzer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' + self.max_token_length = max_token_length + self.stopwords = stopwords + + +class StandardTokenizer(Tokenizer): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 255, + **kwargs + ): + super(StandardTokenizer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' + self.max_token_length = max_token_length + + +class StandardTokenizerV2(Tokenizer): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 255, + **kwargs + ): + super(StandardTokenizerV2, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' + self.max_token_length = max_token_length + + +class StemmerOverrideTokenFilter(TokenFilter): + """Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param rules: Required. A list of stemming rules in the following format: "word => stem", for + example: "ran => run". + :type rules: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'rules': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'rules': {'key': 'rules', 'type': '[str]'}, + } + + def __init__( + self, + *, + name: str, + rules: List[str], + **kwargs + ): + super(StemmerOverrideTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' + self.rules = rules + + +class StemmerTokenFilter(TokenFilter): + """Language specific stemming filter. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param language: Required. The language to use. Possible values include: 'arabic', 'armenian', + 'basque', 'brazilian', 'bulgarian', 'catalan', 'czech', 'danish', 'dutch', 'dutchKp', + 'english', 'lightEnglish', 'minimalEnglish', 'possessiveEnglish', 'porter2', 'lovins', + 'finnish', 'lightFinnish', 'french', 'lightFrench', 'minimalFrench', 'galician', + 'minimalGalician', 'german', 'german2', 'lightGerman', 'minimalGerman', 'greek', 'hindi', + 'hungarian', 'lightHungarian', 'indonesian', 'irish', 'italian', 'lightItalian', 'sorani', + 'latvian', 'norwegian', 'lightNorwegian', 'minimalNorwegian', 'lightNynorsk', 'minimalNynorsk', + 'portuguese', 'lightPortuguese', 'minimalPortuguese', 'portugueseRslp', 'romanian', 'russian', + 'lightRussian', 'spanish', 'lightSpanish', 'swedish', 'lightSwedish', 'turkish'. + :type language: str or ~search_service_client.models.StemmerTokenFilterLanguage + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'language': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'language': {'key': 'language', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + language: Union[str, "StemmerTokenFilterLanguage"], + **kwargs + ): + super(StemmerTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.StemmerTokenFilter' + self.language = language + + +class StopAnalyzer(Analyzer): + """Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param stopwords: A list of stopwords. + :type stopwords: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'stopwords': {'key': 'stopwords', 'type': '[str]'}, + } + + def __init__( + self, + *, + name: str, + stopwords: Optional[List[str]] = None, + **kwargs + ): + super(StopAnalyzer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.StopAnalyzer' + self.stopwords = stopwords + + +class StopwordsTokenFilter(TokenFilter): + """Removes stop words from a token stream. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param stopwords: The list of stopwords. This property and the stopwords list property cannot + both be set. + :type stopwords: list[str] + :param stopwords_list: A predefined list of stopwords to use. This property and the stopwords + property cannot both be set. Default is English. Possible values include: 'arabic', 'armenian', + 'basque', 'brazilian', 'bulgarian', 'catalan', 'czech', 'danish', 'dutch', 'english', + 'finnish', 'french', 'galician', 'german', 'greek', 'hindi', 'hungarian', 'indonesian', + 'irish', 'italian', 'latvian', 'norwegian', 'persian', 'portuguese', 'romanian', 'russian', + 'sorani', 'spanish', 'swedish', 'thai', 'turkish'. + :type stopwords_list: str or ~search_service_client.models.StopwordsList + :param ignore_case: A value indicating whether to ignore case. If true, all words are converted + to lower case first. Default is false. + :type ignore_case: bool + :param remove_trailing_stop_words: A value indicating whether to ignore the last search term if + it's a stop word. Default is true. + :type remove_trailing_stop_words: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'stopwords': {'key': 'stopwords', 'type': '[str]'}, + 'stopwords_list': {'key': 'stopwordsList', 'type': 'str'}, + 'ignore_case': {'key': 'ignoreCase', 'type': 'bool'}, + 'remove_trailing_stop_words': {'key': 'removeTrailing', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + stopwords: Optional[List[str]] = None, + stopwords_list: Optional[Union[str, "StopwordsList"]] = None, + ignore_case: Optional[bool] = False, + remove_trailing_stop_words: Optional[bool] = True, + **kwargs + ): + super(StopwordsTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.StopwordsTokenFilter' + self.stopwords = stopwords + self.stopwords_list = stopwords_list + self.ignore_case = ignore_case + self.remove_trailing_stop_words = remove_trailing_stop_words + + +class Suggester(msrest.serialization.Model): + """Defines how the Suggest API should apply to a group of fields in the index. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the suggester. + :type name: str + :ivar search_mode: Required. A value indicating the capabilities of the suggester. Default + value: "analyzingInfixMatching". + :vartype search_mode: str + :param source_fields: Required. The list of field names to which the suggester applies. Each + field must be searchable. + :type source_fields: list[str] + """ + + _validation = { + 'name': {'required': True}, + 'search_mode': {'required': True, 'constant': True}, + 'source_fields': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'search_mode': {'key': 'searchMode', 'type': 'str'}, + 'source_fields': {'key': 'sourceFields', 'type': '[str]'}, + } + + search_mode = "analyzingInfixMatching" + + def __init__( + self, + *, + name: str, + source_fields: List[str], + **kwargs + ): + super(Suggester, self).__init__(**kwargs) + self.name = name + self.source_fields = source_fields + + +class SynonymMap(msrest.serialization.Model): + """Represents a synonym map definition. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the synonym map. + :type name: str + :ivar format: Required. The format of the synonym map. Only the 'solr' format is currently + supported. Default value: "solr". + :vartype format: str + :param synonyms: Required. A series of synonym rules in the specified synonym map format. The + rules must be separated by newlines. + :type synonyms: str + :param encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your data when you + want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive + Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive + Search will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with + customer-managed keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :type encryption_key: ~search_service_client.models.EncryptionKey + :param e_tag: The ETag of the synonym map. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'format': {'required': True, 'constant': True}, + 'synonyms': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'format': {'key': 'format', 'type': 'str'}, + 'synonyms': {'key': 'synonyms', 'type': 'str'}, + 'encryption_key': {'key': 'encryptionKey', 'type': 'EncryptionKey'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + format = "solr" + + def __init__( + self, + *, + name: str, + synonyms: str, + encryption_key: Optional["EncryptionKey"] = None, + e_tag: Optional[str] = None, + **kwargs + ): + super(SynonymMap, self).__init__(**kwargs) + self.name = name + self.synonyms = synonyms + self.encryption_key = encryption_key + self.e_tag = e_tag + + +class SynonymTokenFilter(TokenFilter): + """Matches single or multi-word synonyms in a token stream. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param synonyms: Required. A list of synonyms in following one of two formats: 1. incredible, + unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced + with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma + separated list of equivalent words. Set the expand option to change how this list is + interpreted. + :type synonyms: list[str] + :param ignore_case: A value indicating whether to case-fold input for matching. Default is + false. + :type ignore_case: bool + :param expand: A value indicating whether all words in the list of synonyms (if => notation is + not used) will map to one another. If true, all words in the list of synonyms (if => notation + is not used) will map to one another. The following list: incredible, unbelievable, fabulous, + amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, + unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, + fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => + incredible. Default is true. + :type expand: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'synonyms': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'synonyms': {'key': 'synonyms', 'type': '[str]'}, + 'ignore_case': {'key': 'ignoreCase', 'type': 'bool'}, + 'expand': {'key': 'expand', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + synonyms: List[str], + ignore_case: Optional[bool] = False, + expand: Optional[bool] = True, + **kwargs + ): + super(SynonymTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.SynonymTokenFilter' + self.synonyms = synonyms + self.ignore_case = ignore_case + self.expand = expand + + +class TagScoringFunction(ScoringFunction): + """Defines a function that boosts scores of documents with string values matching a given list of tags. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param field_name: Required. The name of the field used as input to the scoring function. + :type field_name: str + :param boost: Required. A multiplier for the raw score. Must be a positive number not equal to + 1.0. + :type boost: float + :param interpolation: A value indicating how boosting will be interpolated across document + scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', + 'logarithmic'. + :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + :param parameters: Required. Parameter values for the tag scoring function. + :type parameters: ~search_service_client.models.TagScoringParameters + """ + + _validation = { + 'type': {'required': True}, + 'field_name': {'required': True}, + 'boost': {'required': True}, + 'parameters': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'field_name': {'key': 'fieldName', 'type': 'str'}, + 'boost': {'key': 'boost', 'type': 'float'}, + 'interpolation': {'key': 'interpolation', 'type': 'str'}, + 'parameters': {'key': 'tag', 'type': 'TagScoringParameters'}, + } + + def __init__( + self, + *, + field_name: str, + boost: float, + parameters: "TagScoringParameters", + interpolation: Optional[Union[str, "ScoringFunctionInterpolation"]] = None, + **kwargs + ): + super(TagScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) + self.type = 'tag' + self.parameters = parameters + + +class TagScoringParameters(msrest.serialization.Model): + """Provides parameter values to a tag scoring function. + + All required parameters must be populated in order to send to Azure. + + :param tags_parameter: Required. The name of the parameter passed in search queries to specify + the list of tags to compare against the target field. + :type tags_parameter: str + """ + + _validation = { + 'tags_parameter': {'required': True}, + } + + _attribute_map = { + 'tags_parameter': {'key': 'tagsParameter', 'type': 'str'}, + } + + def __init__( + self, + *, + tags_parameter: str, + **kwargs + ): + super(TagScoringParameters, self).__init__(**kwargs) + self.tags_parameter = tags_parameter + + +class TextTranslationSkill(Skill): + """A skill to translate text from one language to another. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param default_to_language_code: Required. The language code to translate documents into for + documents that don't specify the to language explicitly. Possible values include: 'af', 'ar', + 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', + 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', + 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', + 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', + 'vi', 'cy', 'yua'. + :type default_to_language_code: str or + ~search_service_client.models.TextTranslationSkillLanguage + :param default_from_language_code: The language code to translate documents from for documents + that don't specify the from language explicitly. Possible values include: 'af', 'ar', 'bn', + 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', 'fil', + 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', 'tlh', + 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', 'sr-Cyrl', + 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', 'vi', 'cy', + 'yua'. + :type default_from_language_code: str or + ~search_service_client.models.TextTranslationSkillLanguage + :param suggested_from: The language code to translate documents from when neither the + fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the + automatic language detection is unsuccessful. Default is en. Possible values include: 'af', + 'ar', 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', + 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', + 'sw', 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', + 'sm', 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', + 'ur', 'vi', 'cy', 'yua'. + :type suggested_from: str or ~search_service_client.models.TextTranslationSkillLanguage + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + 'default_to_language_code': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'default_to_language_code': {'key': 'defaultToLanguageCode', 'type': 'str'}, + 'default_from_language_code': {'key': 'defaultFromLanguageCode', 'type': 'str'}, + 'suggested_from': {'key': 'suggestedFrom', 'type': 'str'}, + } + + def __init__( + self, + *, + inputs: List["InputFieldMappingEntry"], + outputs: List["OutputFieldMappingEntry"], + default_to_language_code: Union[str, "TextTranslationSkillLanguage"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_from_language_code: Optional[Union[str, "TextTranslationSkillLanguage"]] = None, + suggested_from: Optional[Union[str, "TextTranslationSkillLanguage"]] = None, + **kwargs + ): + super(TextTranslationSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) + self.odata_type = '#Microsoft.Skills.Text.TranslationSkill' + self.default_to_language_code = default_to_language_code + self.default_from_language_code = default_from_language_code + self.suggested_from = suggested_from + + +class TextWeights(msrest.serialization.Model): + """Defines weights on index fields for which matches should boost scoring in search queries. + + All required parameters must be populated in order to send to Azure. + + :param weights: Required. The dictionary of per-field weights to boost document scoring. The + keys are field names and the values are the weights for each field. + :type weights: dict[str, float] + """ + + _validation = { + 'weights': {'required': True}, + } + + _attribute_map = { + 'weights': {'key': 'weights', 'type': '{float}'}, + } + + def __init__( + self, + *, + weights: Dict[str, float], + **kwargs + ): + super(TextWeights, self).__init__(**kwargs) + self.weights = weights + + +class TokenInfo(msrest.serialization.Model): + """Information about a token returned by an analyzer. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar token: Required. The token returned by the analyzer. + :vartype token: str + :ivar start_offset: Required. The index of the first character of the token in the input text. + :vartype start_offset: int + :ivar end_offset: Required. The index of the last character of the token in the input text. + :vartype end_offset: int + :ivar position: Required. The position of the token in the input text relative to other tokens. + The first token in the input text has position 0, the next has position 1, and so on. Depending + on the analyzer used, some tokens might have the same position, for example if they are + synonyms of each other. + :vartype position: int + """ + + _validation = { + 'token': {'required': True, 'readonly': True}, + 'start_offset': {'required': True, 'readonly': True}, + 'end_offset': {'required': True, 'readonly': True}, + 'position': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'token': {'key': 'token', 'type': 'str'}, + 'start_offset': {'key': 'startOffset', 'type': 'int'}, + 'end_offset': {'key': 'endOffset', 'type': 'int'}, + 'position': {'key': 'position', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(TokenInfo, self).__init__(**kwargs) + self.token = None + self.start_offset = None + self.end_offset = None + self.position = None + + +class TruncateTokenFilter(TokenFilter): + """Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param length: The length at which terms will be truncated. Default and maximum is 300. + :type length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'length': {'key': 'length', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + length: Optional[int] = 300, + **kwargs + ): + super(TruncateTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.TruncateTokenFilter' + self.length = length + + +class UaxUrlEmailTokenizer(Tokenizer): + """Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 255, + **kwargs + ): + super(UaxUrlEmailTokenizer, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.UaxUrlEmailTokenizer' + self.max_token_length = max_token_length + + +class UniqueTokenFilter(TokenFilter): + """Filters out tokens with same text as the previous token. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param only_on_same_position: A value indicating whether to remove duplicates only at the same + position. Default is false. + :type only_on_same_position: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'only_on_same_position': {'key': 'onlyOnSamePosition', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + only_on_same_position: Optional[bool] = False, + **kwargs + ): + super(UniqueTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.UniqueTokenFilter' + self.only_on_same_position = only_on_same_position + + +class WebApiSkill(Skill): + """A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :type name: str + :param description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :type description: str + :param context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :type context: str + :param inputs: Required. Inputs of the skills could be a column in the source data set, or the + output of an upstream skill. + :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :param outputs: Required. The output of a skill is either a field in a search index, or a value + that can be consumed as an input by another skill. + :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :param uri: Required. The url for the Web API. + :type uri: str + :param http_headers: The headers required to make the http request. + :type http_headers: dict[str, str] + :param http_method: The method for the http request. + :type http_method: str + :param timeout: The desired timeout for the request. Default is 30 seconds. + :type timeout: ~datetime.timedelta + :param batch_size: The desired batch size which indicates number of documents. + :type batch_size: int + :param degree_of_parallelism: If set, the number of parallel calls that can be made to the Web + API. + :type degree_of_parallelism: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'inputs': {'required': True}, + 'outputs': {'required': True}, + 'uri': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'context': {'key': 'context', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[InputFieldMappingEntry]'}, + 'outputs': {'key': 'outputs', 'type': '[OutputFieldMappingEntry]'}, + 'uri': {'key': 'uri', 'type': 'str'}, + 'http_headers': {'key': 'httpHeaders', 'type': '{str}'}, + 'http_method': {'key': 'httpMethod', 'type': 'str'}, + 'timeout': {'key': 'timeout', 'type': 'duration'}, + 'batch_size': {'key': 'batchSize', 'type': 'int'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + } + + def __init__( + self, + *, + inputs: List["InputFieldMappingEntry"], + outputs: List["OutputFieldMappingEntry"], + uri: str, + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + http_headers: Optional[Dict[str, str]] = None, + http_method: Optional[str] = None, + timeout: Optional[datetime.timedelta] = None, + batch_size: Optional[int] = None, + degree_of_parallelism: Optional[int] = None, + **kwargs + ): + super(WebApiSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) + self.odata_type = '#Microsoft.Skills.Custom.WebApiSkill' + self.uri = uri + self.http_headers = http_headers + self.http_method = http_method + self.timeout = timeout + self.batch_size = batch_size + self.degree_of_parallelism = degree_of_parallelism + + +class WordDelimiterTokenFilter(TokenFilter): + """Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param generate_word_parts: A value indicating whether to generate part words. If set, causes + parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is + true. + :type generate_word_parts: bool + :param generate_number_parts: A value indicating whether to generate number subwords. Default + is true. + :type generate_number_parts: bool + :param catenate_words: A value indicating whether maximum runs of word parts will be catenated. + For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false. + :type catenate_words: bool + :param catenate_numbers: A value indicating whether maximum runs of number parts will be + catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. + :type catenate_numbers: bool + :param catenate_all: A value indicating whether all subword parts will be catenated. For + example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. + :type catenate_all: bool + :param split_on_case_change: A value indicating whether to split words on caseChange. For + example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. + :type split_on_case_change: bool + :param preserve_original: A value indicating whether original words will be preserved and added + to the subword list. Default is false. + :type preserve_original: bool + :param split_on_numerics: A value indicating whether to split on numbers. For example, if this + is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. + :type split_on_numerics: bool + :param stem_english_possessive: A value indicating whether to remove trailing "'s" for each + subword. Default is true. + :type stem_english_possessive: bool + :param protected_words: A list of tokens to protect from being delimited. + :type protected_words: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'generate_word_parts': {'key': 'generateWordParts', 'type': 'bool'}, + 'generate_number_parts': {'key': 'generateNumberParts', 'type': 'bool'}, + 'catenate_words': {'key': 'catenateWords', 'type': 'bool'}, + 'catenate_numbers': {'key': 'catenateNumbers', 'type': 'bool'}, + 'catenate_all': {'key': 'catenateAll', 'type': 'bool'}, + 'split_on_case_change': {'key': 'splitOnCaseChange', 'type': 'bool'}, + 'preserve_original': {'key': 'preserveOriginal', 'type': 'bool'}, + 'split_on_numerics': {'key': 'splitOnNumerics', 'type': 'bool'}, + 'stem_english_possessive': {'key': 'stemEnglishPossessive', 'type': 'bool'}, + 'protected_words': {'key': 'protectedWords', 'type': '[str]'}, + } + + def __init__( + self, + *, + name: str, + generate_word_parts: Optional[bool] = True, + generate_number_parts: Optional[bool] = True, + catenate_words: Optional[bool] = False, + catenate_numbers: Optional[bool] = False, + catenate_all: Optional[bool] = False, + split_on_case_change: Optional[bool] = True, + preserve_original: Optional[bool] = False, + split_on_numerics: Optional[bool] = True, + stem_english_possessive: Optional[bool] = True, + protected_words: Optional[List[str]] = None, + **kwargs + ): + super(WordDelimiterTokenFilter, self).__init__(name=name, **kwargs) + self.odata_type = '#Microsoft.Azure.Search.WordDelimiterTokenFilter' + self.generate_word_parts = generate_word_parts + self.generate_number_parts = generate_number_parts + self.catenate_words = catenate_words + self.catenate_numbers = catenate_numbers + self.catenate_all = catenate_all + self.split_on_case_change = split_on_case_change + self.preserve_original = preserve_original + self.split_on_numerics = split_on_numerics + self.stem_english_possessive = stem_english_possessive + self.protected_words = protected_words diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py new file mode 100644 index 0000000000000..137ae7fcaef2c --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py @@ -0,0 +1,734 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + +class DataSourceType(str, Enum): + """Defines the type of a datasource. + """ + + azure_sql = "azuresql" + cosmos_db = "cosmosdb" + azure_blob = "azureblob" + azure_table = "azuretable" + my_sql = "mysql" + +class IndexerExecutionStatus(str, Enum): + """Represents the status of an individual indexer execution. + """ + + transient_failure = "transientFailure" + success = "success" + in_progress = "inProgress" + reset = "reset" + +class DataType(str, Enum): + """Defines the data type of a field in a search index. + """ + + edm_string = "Edm.String" + edm_int32 = "Edm.Int32" + edm_int64 = "Edm.Int64" + edm_double = "Edm.Double" + edm_boolean = "Edm.Boolean" + edm_date_time_offset = "Edm.DateTimeOffset" + edm_geography_point = "Edm.GeographyPoint" + edm_complex_type = "Edm.ComplexType" + +class AnalyzerName(str, Enum): + """Defines the names of all text analyzers supported by Azure Cognitive Search. + """ + + ar_microsoft = "ar.microsoft" + ar_lucene = "ar.lucene" + hy_lucene = "hy.lucene" + bn_microsoft = "bn.microsoft" + eu_lucene = "eu.lucene" + bg_microsoft = "bg.microsoft" + bg_lucene = "bg.lucene" + ca_microsoft = "ca.microsoft" + ca_lucene = "ca.lucene" + zh_hans_microsoft = "zh-Hans.microsoft" + zh_hans_lucene = "zh-Hans.lucene" + zh_hant_microsoft = "zh-Hant.microsoft" + zh_hant_lucene = "zh-Hant.lucene" + hr_microsoft = "hr.microsoft" + cs_microsoft = "cs.microsoft" + cs_lucene = "cs.lucene" + da_microsoft = "da.microsoft" + da_lucene = "da.lucene" + nl_microsoft = "nl.microsoft" + nl_lucene = "nl.lucene" + en_microsoft = "en.microsoft" + en_lucene = "en.lucene" + et_microsoft = "et.microsoft" + fi_microsoft = "fi.microsoft" + fi_lucene = "fi.lucene" + fr_microsoft = "fr.microsoft" + fr_lucene = "fr.lucene" + gl_lucene = "gl.lucene" + de_microsoft = "de.microsoft" + de_lucene = "de.lucene" + el_microsoft = "el.microsoft" + el_lucene = "el.lucene" + gu_microsoft = "gu.microsoft" + he_microsoft = "he.microsoft" + hi_microsoft = "hi.microsoft" + hi_lucene = "hi.lucene" + hu_microsoft = "hu.microsoft" + hu_lucene = "hu.lucene" + is_microsoft = "is.microsoft" + id_microsoft = "id.microsoft" + id_lucene = "id.lucene" + ga_lucene = "ga.lucene" + it_microsoft = "it.microsoft" + it_lucene = "it.lucene" + ja_microsoft = "ja.microsoft" + ja_lucene = "ja.lucene" + kn_microsoft = "kn.microsoft" + ko_microsoft = "ko.microsoft" + ko_lucene = "ko.lucene" + lv_microsoft = "lv.microsoft" + lv_lucene = "lv.lucene" + lt_microsoft = "lt.microsoft" + ml_microsoft = "ml.microsoft" + ms_microsoft = "ms.microsoft" + mr_microsoft = "mr.microsoft" + nb_microsoft = "nb.microsoft" + no_lucene = "no.lucene" + fa_lucene = "fa.lucene" + pl_microsoft = "pl.microsoft" + pl_lucene = "pl.lucene" + pt_br_microsoft = "pt-BR.microsoft" + pt_br_lucene = "pt-BR.lucene" + pt_microsoft = "pt-PT.microsoft" + pt_lucene = "pt-PT.lucene" + pa_microsoft = "pa.microsoft" + ro_microsoft = "ro.microsoft" + ro_lucene = "ro.lucene" + ru_microsoft = "ru.microsoft" + ru_lucene = "ru.lucene" + sr_cyrillic_microsoft = "sr-cyrillic.microsoft" + sr_latin_microsoft = "sr-latin.microsoft" + sk_microsoft = "sk.microsoft" + sl_microsoft = "sl.microsoft" + es_microsoft = "es.microsoft" + es_lucene = "es.lucene" + sv_microsoft = "sv.microsoft" + sv_lucene = "sv.lucene" + ta_microsoft = "ta.microsoft" + te_microsoft = "te.microsoft" + th_microsoft = "th.microsoft" + th_lucene = "th.lucene" + tr_microsoft = "tr.microsoft" + tr_lucene = "tr.lucene" + uk_microsoft = "uk.microsoft" + ur_microsoft = "ur.microsoft" + vi_microsoft = "vi.microsoft" + standard_lucene = "standard.lucene" + standardasciifolding_lucene = "standardasciifolding.lucene" + keyword = "keyword" + pattern = "pattern" + simple = "simple" + stop = "stop" + whitespace = "whitespace" + +class ScoringFunctionInterpolation(str, Enum): + """Defines the function used to interpolate score boosting across a range of documents. + """ + + linear = "linear" + constant = "constant" + quadratic = "quadratic" + logarithmic = "logarithmic" + +class ScoringFunctionAggregation(str, Enum): + """Defines the aggregation function used to combine the results of all the scoring functions in a + scoring profile. + """ + + sum = "sum" + average = "average" + minimum = "minimum" + maximum = "maximum" + first_matching = "firstMatching" + +class TokenFilterName(str, Enum): + """Defines the names of all token filters supported by Azure Cognitive Search. + """ + + arabic_normalization = "arabic_normalization" #: A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html. + apostrophe = "apostrophe" #: Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html. + ascii_folding = "asciifolding" #: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html. + cjk_bigram = "cjk_bigram" #: Forms bigrams of CJK terms that are generated from StandardTokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html. + cjk_width = "cjk_width" #: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html. + classic = "classic" #: Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html. + common_gram = "common_grams" #: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html. + edge_n_gram = "edgeNGram_v2" #: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html. + elision = "elision" #: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html. + german_normalization = "german_normalization" #: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html. + hindi_normalization = "hindi_normalization" #: Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html. + indic_normalization = "indic_normalization" #: Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html. + keyword_repeat = "keyword_repeat" #: Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html. + k_stem = "kstem" #: A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html. + length = "length" #: Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html. + limit = "limit" #: Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html. + lowercase = "lowercase" #: Normalizes token text to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm. + n_gram = "nGram_v2" #: Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html. + persian_normalization = "persian_normalization" #: Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html. + phonetic = "phonetic" #: Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html. + porter_stem = "porter_stem" #: Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/PorterStemmer. + reverse = "reverse" #: Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html. + scandinavian_normalization = "scandinavian_normalization" #: Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html. + scandinavian_folding_normalization = "scandinavian_folding" #: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html. + shingle = "shingle" #: Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html. + snowball = "snowball" #: A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html. + sorani_normalization = "sorani_normalization" #: Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html. + stemmer = "stemmer" #: Language specific stemming filter. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters. + stopwords = "stopwords" #: Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html. + trim = "trim" #: Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html. + truncate = "truncate" #: Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html. + unique = "unique" #: Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html. + uppercase = "uppercase" #: Normalizes token text to upper case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html. + word_delimiter = "word_delimiter" #: Splits words into subwords and performs optional transformations on subword groups. + +class TokenCharacterKind(str, Enum): + """Represents classes of characters on which a token filter can operate. + """ + + letter = "letter" + digit = "digit" + whitespace = "whitespace" + punctuation = "punctuation" + symbol = "symbol" + +class CjkBigramTokenFilterScripts(str, Enum): + """Scripts that can be ignored by CjkBigramTokenFilter. + """ + + han = "han" + hiragana = "hiragana" + katakana = "katakana" + hangul = "hangul" + +class VisualFeature(str, Enum): + """The strings indicating what visual feature types to return. + """ + + adult = "adult" + brands = "brands" + categories = "categories" + description = "description" + faces = "faces" + objects = "objects" + tags = "tags" + +class ImageDetail(str, Enum): + """A string indicating which domain-specific details to return. + """ + + celebrities = "celebrities" + landmarks = "landmarks" + +class EntityCategory(str, Enum): + """A string indicating what entity categories to return. + """ + + location = "location" + organization = "organization" + person = "person" + quantity = "quantity" + datetime = "datetime" + url = "url" + email = "email" + +class TokenizerName(str, Enum): + """Defines the names of all tokenizers supported by Azure Cognitive Search. + """ + + classic = "classic" #: Grammar-based tokenizer that is suitable for processing most European-language documents. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html. + edge_n_gram = "edgeNGram" #: Tokenizes the input from an edge into n-grams of the given size(s). See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html. + keyword = "keyword_v2" #: Emits the entire input as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html. + letter = "letter" #: Divides text at non-letters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html. + lowercase = "lowercase" #: Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html. + microsoft_language_tokenizer = "microsoft_language_tokenizer" #: Divides text using language-specific rules. + microsoft_language_stemming_tokenizer = "microsoft_language_stemming_tokenizer" #: Divides text using language-specific rules and reduces words to their base forms. + n_gram = "nGram" #: Tokenizes the input into n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html. + path_hierarchy = "path_hierarchy_v2" #: Tokenizer for path-like hierarchies. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html. + pattern = "pattern" #: Tokenizer that uses regex pattern matching to construct distinct tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html. + standard = "standard_v2" #: Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html. + uax_url_email = "uax_url_email" #: Tokenizes urls and emails as one token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html. + whitespace = "whitespace" #: Divides text at whitespace. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html. + +class RegexFlags(str, Enum): + """Defines flags that can be combined to control how regular expressions are used in the pattern + analyzer and pattern tokenizer. + """ + + canon_eq = "CANON_EQ" + case_insensitive = "CASE_INSENSITIVE" + comments = "COMMENTS" + dotall = "DOTALL" + literal = "LITERAL" + multiline = "MULTILINE" + unicode_case = "UNICODE_CASE" + unix_lines = "UNIX_LINES" + +class KeyPhraseExtractionSkillLanguage(str, Enum): + """The language codes supported for input text by KeyPhraseExtractionSkill. + """ + + da = "da" #: Danish. + nl = "nl" #: Dutch. + en = "en" #: English. + fi = "fi" #: Finnish. + fr = "fr" #: French. + de = "de" #: German. + it = "it" #: Italian. + ja = "ja" #: Japanese. + ko = "ko" #: Korean. + no = "no" #: Norwegian (Bokmaal). + pl = "pl" #: Polish. + pt = "pt-PT" #: Portuguese (Portugal). + pt_br = "pt-BR" #: Portuguese (Brazil). + ru = "ru" #: Russian. + es = "es" #: Spanish. + sv = "sv" #: Swedish. + +class OcrSkillLanguage(str, Enum): + """The language codes supported for input by OcrSkill. + """ + + zh_hans = "zh-Hans" #: Chinese-Simplified. + zh_hant = "zh-Hant" #: Chinese-Traditional. + cs = "cs" #: Czech. + da = "da" #: Danish. + nl = "nl" #: Dutch. + en = "en" #: English. + fi = "fi" #: Finnish. + fr = "fr" #: French. + de = "de" #: German. + el = "el" #: Greek. + hu = "hu" #: Hungarian. + it = "it" #: Italian. + ja = "ja" #: Japanese. + ko = "ko" #: Korean. + nb = "nb" #: Norwegian (Bokmaal). + pl = "pl" #: Polish. + pt = "pt" #: Portuguese. + ru = "ru" #: Russian. + es = "es" #: Spanish. + sv = "sv" #: Swedish. + tr = "tr" #: Turkish. + ar = "ar" #: Arabic. + ro = "ro" #: Romanian. + sr_cyrl = "sr-Cyrl" #: Serbian (Cyrillic, Serbia). + sr_latn = "sr-Latn" #: Serbian (Latin, Serbia). + sk = "sk" #: Slovak. + +class ImageAnalysisSkillLanguage(str, Enum): + """The language codes supported for input by ImageAnalysisSkill. + """ + + en = "en" #: English. + es = "es" #: Spanish. + ja = "ja" #: Japanese. + pt = "pt" #: Portuguese. + zh = "zh" #: Chinese. + +class EntityRecognitionSkillLanguage(str, Enum): + """The language codes supported for input text by EntityRecognitionSkill. + """ + + ar = "ar" #: Arabic. + cs = "cs" #: Czech. + zh_hans = "zh-Hans" #: Chinese-Simplified. + zh_hant = "zh-Hant" #: Chinese-Traditional. + da = "da" #: Danish. + nl = "nl" #: Dutch. + en = "en" #: English. + fi = "fi" #: Finnish. + fr = "fr" #: French. + de = "de" #: German. + el = "el" #: Greek. + hu = "hu" #: Hungarian. + it = "it" #: Italian. + ja = "ja" #: Japanese. + ko = "ko" #: Korean. + no = "no" #: Norwegian (Bokmaal). + pl = "pl" #: Polish. + pt = "pt-PT" #: Portuguese (Portugal). + pt_br = "pt-BR" #: Portuguese (Brazil). + ru = "ru" #: Russian. + es = "es" #: Spanish. + sv = "sv" #: Swedish. + tr = "tr" #: Turkish. + +class SentimentSkillLanguage(str, Enum): + """The language codes supported for input text by SentimentSkill. + """ + + da = "da" #: Danish. + nl = "nl" #: Dutch. + en = "en" #: English. + fi = "fi" #: Finnish. + fr = "fr" #: French. + de = "de" #: German. + el = "el" #: Greek. + it = "it" #: Italian. + no = "no" #: Norwegian (Bokmaal). + pl = "pl" #: Polish. + pt = "pt-PT" #: Portuguese (Portugal). + ru = "ru" #: Russian. + es = "es" #: Spanish. + sv = "sv" #: Swedish. + tr = "tr" #: Turkish. + +class SplitSkillLanguage(str, Enum): + """The language codes supported for input text by SplitSkill. + """ + + da = "da" #: Danish. + de = "de" #: German. + en = "en" #: English. + es = "es" #: Spanish. + fi = "fi" #: Finnish. + fr = "fr" #: French. + it = "it" #: Italian. + ko = "ko" #: Korean. + pt = "pt" #: Portuguese. + +class TextTranslationSkillLanguage(str, Enum): + """The language codes supported for input text by TextTranslationSkill. + """ + + af = "af" #: Afrikaans. + ar = "ar" #: Arabic. + bn = "bn" #: Bangla. + bs = "bs" #: Bosnian (Latin). + bg = "bg" #: Bulgarian. + yue = "yue" #: Cantonese (Traditional). + ca = "ca" #: Catalan. + zh_hans = "zh-Hans" #: Chinese Simplified. + zh_hant = "zh-Hant" #: Chinese Traditional. + hr = "hr" #: Croatian. + cs = "cs" #: Czech. + da = "da" #: Danish. + nl = "nl" #: Dutch. + en = "en" #: English. + et = "et" #: Estonian. + fj = "fj" #: Fijian. + fil = "fil" #: Filipino. + fi = "fi" #: Finnish. + fr = "fr" #: French. + de = "de" #: German. + el = "el" #: Greek. + ht = "ht" #: Haitian Creole. + he = "he" #: Hebrew. + hi = "hi" #: Hindi. + mww = "mww" #: Hmong Daw. + hu = "hu" #: Hungarian. + is_enum = "is" #: Icelandic. + id = "id" #: Indonesian. + it = "it" #: Italian. + ja = "ja" #: Japanese. + sw = "sw" #: Kiswahili. + tlh = "tlh" #: Klingon. + ko = "ko" #: Korean. + lv = "lv" #: Latvian. + lt = "lt" #: Lithuanian. + mg = "mg" #: Malagasy. + ms = "ms" #: Malay. + mt = "mt" #: Maltese. + nb = "nb" #: Norwegian. + fa = "fa" #: Persian. + pl = "pl" #: Polish. + pt = "pt" #: Portuguese. + otq = "otq" #: Queretaro Otomi. + ro = "ro" #: Romanian. + ru = "ru" #: Russian. + sm = "sm" #: Samoan. + sr_cyrl = "sr-Cyrl" #: Serbian (Cyrillic). + sr_latn = "sr-Latn" #: Serbian (Latin). + sk = "sk" #: Slovak. + sl = "sl" #: Slovenian. + es = "es" #: Spanish. + sv = "sv" #: Swedish. + ty = "ty" #: Tahitian. + ta = "ta" #: Tamil. + te = "te" #: Telugu. + th = "th" #: Thai. + to = "to" #: Tongan. + tr = "tr" #: Turkish. + uk = "uk" #: Ukrainian. + ur = "ur" #: Urdu. + vi = "vi" #: Vietnamese. + cy = "cy" #: Welsh. + yua = "yua" #: Yucatec Maya. + +class IndexerStatus(str, Enum): + """Represents the overall indexer status. + """ + + unknown = "unknown" + error = "error" + running = "running" + +class MicrosoftTokenizerLanguage(str, Enum): + """Lists the languages supported by the Microsoft language tokenizer. + """ + + bangla = "bangla" + bulgarian = "bulgarian" + catalan = "catalan" + chinese_simplified = "chineseSimplified" + chinese_traditional = "chineseTraditional" + croatian = "croatian" + czech = "czech" + danish = "danish" + dutch = "dutch" + english = "english" + french = "french" + german = "german" + greek = "greek" + gujarati = "gujarati" + hindi = "hindi" + icelandic = "icelandic" + indonesian = "indonesian" + italian = "italian" + japanese = "japanese" + kannada = "kannada" + korean = "korean" + malay = "malay" + malayalam = "malayalam" + marathi = "marathi" + norwegian_bokmaal = "norwegianBokmaal" + polish = "polish" + portuguese = "portuguese" + portuguese_brazilian = "portugueseBrazilian" + punjabi = "punjabi" + romanian = "romanian" + russian = "russian" + serbian_cyrillic = "serbianCyrillic" + serbian_latin = "serbianLatin" + slovenian = "slovenian" + spanish = "spanish" + swedish = "swedish" + tamil = "tamil" + telugu = "telugu" + thai = "thai" + ukrainian = "ukrainian" + urdu = "urdu" + vietnamese = "vietnamese" + +class MicrosoftStemmingTokenizerLanguage(str, Enum): + """Lists the languages supported by the Microsoft language stemming tokenizer. + """ + + arabic = "arabic" + bangla = "bangla" + bulgarian = "bulgarian" + catalan = "catalan" + croatian = "croatian" + czech = "czech" + danish = "danish" + dutch = "dutch" + english = "english" + estonian = "estonian" + finnish = "finnish" + french = "french" + german = "german" + greek = "greek" + gujarati = "gujarati" + hebrew = "hebrew" + hindi = "hindi" + hungarian = "hungarian" + icelandic = "icelandic" + indonesian = "indonesian" + italian = "italian" + kannada = "kannada" + latvian = "latvian" + lithuanian = "lithuanian" + malay = "malay" + malayalam = "malayalam" + marathi = "marathi" + norwegian_bokmaal = "norwegianBokmaal" + polish = "polish" + portuguese = "portuguese" + portuguese_brazilian = "portugueseBrazilian" + punjabi = "punjabi" + romanian = "romanian" + russian = "russian" + serbian_cyrillic = "serbianCyrillic" + serbian_latin = "serbianLatin" + slovak = "slovak" + slovenian = "slovenian" + spanish = "spanish" + swedish = "swedish" + tamil = "tamil" + telugu = "telugu" + turkish = "turkish" + ukrainian = "ukrainian" + urdu = "urdu" + +class EdgeNGramTokenFilterSide(str, Enum): + """Specifies which side of the input an n-gram should be generated from. + """ + + front = "front" + back = "back" + +class PhoneticEncoder(str, Enum): + """Identifies the type of phonetic encoder to use with a PhoneticTokenFilter. + """ + + metaphone = "metaphone" + double_metaphone = "doubleMetaphone" + soundex = "soundex" + refined_soundex = "refinedSoundex" + caverphone1 = "caverphone1" + caverphone2 = "caverphone2" + cologne = "cologne" + nysiis = "nysiis" + koelner_phonetik = "koelnerPhonetik" + haase_phonetik = "haasePhonetik" + beider_morse = "beiderMorse" + +class SnowballTokenFilterLanguage(str, Enum): + """The language to use for a Snowball token filter. + """ + + armenian = "armenian" + basque = "basque" + catalan = "catalan" + danish = "danish" + dutch = "dutch" + english = "english" + finnish = "finnish" + french = "french" + german = "german" + german2 = "german2" + hungarian = "hungarian" + italian = "italian" + kp = "kp" + lovins = "lovins" + norwegian = "norwegian" + porter = "porter" + portuguese = "portuguese" + romanian = "romanian" + russian = "russian" + spanish = "spanish" + swedish = "swedish" + turkish = "turkish" + +class StemmerTokenFilterLanguage(str, Enum): + """The language to use for a stemmer token filter. + """ + + arabic = "arabic" + armenian = "armenian" + basque = "basque" + brazilian = "brazilian" + bulgarian = "bulgarian" + catalan = "catalan" + czech = "czech" + danish = "danish" + dutch = "dutch" + dutch_kp = "dutchKp" + english = "english" + light_english = "lightEnglish" + minimal_english = "minimalEnglish" + possessive_english = "possessiveEnglish" + porter2 = "porter2" + lovins = "lovins" + finnish = "finnish" + light_finnish = "lightFinnish" + french = "french" + light_french = "lightFrench" + minimal_french = "minimalFrench" + galician = "galician" + minimal_galician = "minimalGalician" + german = "german" + german2 = "german2" + light_german = "lightGerman" + minimal_german = "minimalGerman" + greek = "greek" + hindi = "hindi" + hungarian = "hungarian" + light_hungarian = "lightHungarian" + indonesian = "indonesian" + irish = "irish" + italian = "italian" + light_italian = "lightItalian" + sorani = "sorani" + latvian = "latvian" + norwegian = "norwegian" + light_norwegian = "lightNorwegian" + minimal_norwegian = "minimalNorwegian" + light_nynorsk = "lightNynorsk" + minimal_nynorsk = "minimalNynorsk" + portuguese = "portuguese" + light_portuguese = "lightPortuguese" + minimal_portuguese = "minimalPortuguese" + portuguese_rslp = "portugueseRslp" + romanian = "romanian" + russian = "russian" + light_russian = "lightRussian" + spanish = "spanish" + light_spanish = "lightSpanish" + swedish = "swedish" + light_swedish = "lightSwedish" + turkish = "turkish" + +class StopwordsList(str, Enum): + """Identifies a predefined list of language-specific stopwords. + """ + + arabic = "arabic" + armenian = "armenian" + basque = "basque" + brazilian = "brazilian" + bulgarian = "bulgarian" + catalan = "catalan" + czech = "czech" + danish = "danish" + dutch = "dutch" + english = "english" + finnish = "finnish" + french = "french" + galician = "galician" + german = "german" + greek = "greek" + hindi = "hindi" + hungarian = "hungarian" + indonesian = "indonesian" + irish = "irish" + italian = "italian" + latvian = "latvian" + norwegian = "norwegian" + persian = "persian" + portuguese = "portuguese" + romanian = "romanian" + russian = "russian" + sorani = "sorani" + spanish = "spanish" + swedish = "swedish" + thai = "thai" + turkish = "turkish" + +class TextExtractionAlgorithm(str, Enum): + """A value indicating which algorithm to use. Default is printed. + """ + + printed = "printed" + handwritten = "handwritten" + +class TextSplitMode(str, Enum): + """A value indicating which split mode to perform. + """ + + pages = "pages" + sentences = "sentences" diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/__init__.py new file mode 100644 index 0000000000000..d87e3cc4debbd --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/__init__.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._data_sources_operations import DataSourcesOperations +from ._indexers_operations import IndexersOperations +from ._skillsets_operations import SkillsetsOperations +from ._synonym_maps_operations import SynonymMapsOperations +from ._indexes_operations import IndexesOperations +from ._search_service_client_operations import SearchServiceClientOperationsMixin + +__all__ = [ + 'DataSourcesOperations', + 'IndexersOperations', + 'SkillsetsOperations', + 'SynonymMapsOperations', + 'IndexesOperations', + 'SearchServiceClientOperationsMixin', +] diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py new file mode 100644 index 0000000000000..7ba6982701ccf --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py @@ -0,0 +1,390 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class DataSourcesOperations(object): + """DataSourcesOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~search_service_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create_or_update( + self, + data_source_name, # type: str + data_source, # type: "models.DataSource" + request_options=None, # type: Optional["models.RequestOptions"] + access_condition=None, # type: Optional["models.AccessCondition"] + **kwargs # type: Any + ): + # type: (...) -> "models.DataSource" + """Creates a new datasource or updates a datasource if it already exists. + + :param data_source_name: The name of the datasource to create or update. + :type data_source_name: str + :param data_source: The definition of the datasource to create or update. + :type data_source: ~search_service_client.models.DataSource + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DataSource or the result of cls(response) + :rtype: ~search_service_client.models.DataSource or ~search_service_client.models.DataSource + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + prefer = "return=representation" + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(data_source, 'DataSource') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('DataSource', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('DataSource', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + + def delete( + self, + data_source_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + access_condition=None, # type: Optional["models.AccessCondition"] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes a datasource. + + :param data_source_name: The name of the datasource to delete. + :type data_source_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + + def get( + self, + data_source_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.DataSource" + """Retrieves a datasource definition. + + :param data_source_name: The name of the datasource to retrieve. + :type data_source_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DataSource or the result of cls(response) + :rtype: ~search_service_client.models.DataSource + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DataSource', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + + def list( + self, + select=None, # type: Optional[str] + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.ListDataSourcesResult" + """Lists all datasources available for a search service. + + :param select: Selects which top-level properties of the data sources to retrieve. Specified as + a comma-separated list of JSON property names, or '*' for all properties. The default is all + properties. + :type select: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListDataSourcesResult or the result of cls(response) + :rtype: ~search_service_client.models.ListDataSourcesResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ListDataSourcesResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ListDataSourcesResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/datasources'} + + def create( + self, + data_source, # type: "models.DataSource" + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.DataSource" + """Creates a new datasource. + + :param data_source: The definition of the datasource to create. + :type data_source: ~search_service_client.models.DataSource + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DataSource or the result of cls(response) + :rtype: ~search_service_client.models.DataSource + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(data_source, 'DataSource') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('DataSource', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create.metadata = {'url': '/datasources'} diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py new file mode 100644 index 0000000000000..38ed69e0fb8d2 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py @@ -0,0 +1,568 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class IndexersOperations(object): + """IndexersOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~search_service_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def reset( + self, + indexer_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Resets the change tracking state associated with an indexer. + + :param indexer_name: The name of the indexer to reset. + :type indexer_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.reset.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + reset.metadata = {'url': '/indexers(\'{indexerName}\')/search.reset'} + + def run( + self, + indexer_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Runs an indexer on-demand. + + :param indexer_name: The name of the indexer to run. + :type indexer_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.run.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + run.metadata = {'url': '/indexers(\'{indexerName}\')/search.run'} + + def create_or_update( + self, + indexer_name, # type: str + indexer, # type: "models.Indexer" + request_options=None, # type: Optional["models.RequestOptions"] + access_condition=None, # type: Optional["models.AccessCondition"] + **kwargs # type: Any + ): + # type: (...) -> "models.Indexer" + """Creates a new indexer or updates an indexer if it already exists. + + :param indexer_name: The name of the indexer to create or update. + :type indexer_name: str + :param indexer: The definition of the indexer to create or update. + :type indexer: ~search_service_client.models.Indexer + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Indexer or the result of cls(response) + :rtype: ~search_service_client.models.Indexer or ~search_service_client.models.Indexer + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + prefer = "return=representation" + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(indexer, 'Indexer') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('Indexer', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('Indexer', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/indexers(\'{indexerName}\')'} + + def delete( + self, + indexer_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + access_condition=None, # type: Optional["models.AccessCondition"] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes an indexer. + + :param indexer_name: The name of the indexer to delete. + :type indexer_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/indexers(\'{indexerName}\')'} + + def get( + self, + indexer_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.Indexer" + """Retrieves an indexer definition. + + :param indexer_name: The name of the indexer to retrieve. + :type indexer_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Indexer or the result of cls(response) + :rtype: ~search_service_client.models.Indexer + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Indexer', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/indexers(\'{indexerName}\')'} + + def list( + self, + select=None, # type: Optional[str] + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.ListIndexersResult" + """Lists all indexers available for a search service. + + :param select: Selects which top-level properties of the indexers to retrieve. Specified as a + comma-separated list of JSON property names, or '*' for all properties. The default is all + properties. + :type select: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListIndexersResult or the result of cls(response) + :rtype: ~search_service_client.models.ListIndexersResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ListIndexersResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ListIndexersResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/indexers'} + + def create( + self, + indexer, # type: "models.Indexer" + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.Indexer" + """Creates a new indexer. + + :param indexer: The definition of the indexer to create. + :type indexer: ~search_service_client.models.Indexer + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Indexer or the result of cls(response) + :rtype: ~search_service_client.models.Indexer + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(indexer, 'Indexer') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Indexer', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create.metadata = {'url': '/indexers'} + + def get_status( + self, + indexer_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.IndexerExecutionInfo" + """Returns the current status and execution history of an indexer. + + :param indexer_name: The name of the indexer for which to retrieve status. + :type indexer_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IndexerExecutionInfo or the result of cls(response) + :rtype: ~search_service_client.models.IndexerExecutionInfo + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.IndexerExecutionInfo"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get_status.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('IndexerExecutionInfo', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_status.metadata = {'url': '/indexers(\'{indexerName}\')/search.status'} diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py new file mode 100644 index 0000000000000..74d5ef6e01cb8 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py @@ -0,0 +1,531 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class IndexesOperations(object): + """IndexesOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~search_service_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + index, # type: "models.Index" + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.Index" + """Creates a new search index. + + :param index: The definition of the index to create. + :type index: ~search_service_client.models.Index + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Index or the result of cls(response) + :rtype: ~search_service_client.models.Index + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(index, 'Index') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Index', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create.metadata = {'url': '/indexes'} + + def list( + self, + select=None, # type: Optional[str] + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.ListIndexesResult" + """Lists all indexes available for a search service. + + :param select: Selects which top-level properties of the index definitions to retrieve. + Specified as a comma-separated list of JSON property names, or '*' for all properties. The + default is all properties. + :type select: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListIndexesResult or the result of cls(response) + :rtype: ~search_service_client.models.ListIndexesResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ListIndexesResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ListIndexesResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/indexes'} + + def create_or_update( + self, + index_name, # type: str + index, # type: "models.Index" + allow_index_downtime=None, # type: Optional[bool] + request_options=None, # type: Optional["models.RequestOptions"] + access_condition=None, # type: Optional["models.AccessCondition"] + **kwargs # type: Any + ): + # type: (...) -> "models.Index" + """Creates a new search index or updates an index if it already exists. + + :param index_name: The definition of the index to create or update. + :type index_name: str + :param index: The definition of the index to create or update. + :type index: ~search_service_client.models.Index + :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters + to be added to an index by taking the index offline for at least a few seconds. This + temporarily causes indexing and query requests to fail. Performance and write availability of + the index can be impaired for several minutes after the index is updated, or longer for very + large indexes. + :type allow_index_downtime: bool + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Index or the result of cls(response) + :rtype: ~search_service_client.models.Index or ~search_service_client.models.Index + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + prefer = "return=representation" + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexName': self._serialize.url("index_name", index_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if allow_index_downtime is not None: + query_parameters['allowIndexDowntime'] = self._serialize.query("allow_index_downtime", allow_index_downtime, 'bool') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(index, 'Index') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('Index', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('Index', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/indexes(\'{indexName}\')'} + + def delete( + self, + index_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + access_condition=None, # type: Optional["models.AccessCondition"] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes a search index and all the documents it contains. + + :param index_name: The name of the index to delete. + :type index_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexName': self._serialize.url("index_name", index_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/indexes(\'{indexName}\')'} + + def get( + self, + index_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.Index" + """Retrieves an index definition. + + :param index_name: The name of the index to retrieve. + :type index_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Index or the result of cls(response) + :rtype: ~search_service_client.models.Index + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexName': self._serialize.url("index_name", index_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Index', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/indexes(\'{indexName}\')'} + + def get_statistics( + self, + index_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.GetIndexStatisticsResult" + """Returns statistics for the given index, including a document count and storage usage. + + :param index_name: The name of the index for which to retrieve statistics. + :type index_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: GetIndexStatisticsResult or the result of cls(response) + :rtype: ~search_service_client.models.GetIndexStatisticsResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.GetIndexStatisticsResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get_statistics.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexName': self._serialize.url("index_name", index_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('GetIndexStatisticsResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_statistics.metadata = {'url': '/indexes(\'{indexName}\')/search.stats'} + + def analyze( + self, + index_name, # type: str + request_todo, # type: "models.AnalyzeRequest" + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.AnalyzeResult" + """Shows how an analyzer breaks text into tokens. + + :param index_name: The name of the index for which to test an analyzer. + :type index_name: str + :param request_todo: The text and analyzer or analysis components to test. + :type request_todo: ~search_service_client.models.AnalyzeRequest + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: AnalyzeResult or the result of cls(response) + :rtype: ~search_service_client.models.AnalyzeResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.analyze.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'indexName': self._serialize.url("index_name", index_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(request_todo, 'AnalyzeRequest') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('AnalyzeResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + analyze.metadata = {'url': '/indexes(\'{indexName}\')/search.analyze'} diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_search_service_client_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_search_service_client_operations.py new file mode 100644 index 0000000000000..ce9beea5f2782 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_search_service_client_operations.py @@ -0,0 +1,76 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class SearchServiceClientOperationsMixin(object): + + def get_service_statistics( + self, + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.ServiceStatistics" + """Gets service level statistics for a search service. + + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ServiceStatistics or the result of cls(response) + :rtype: ~search_service_client.models.ServiceStatistics + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceStatistics"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get_service_statistics.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ServiceStatistics', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_service_statistics.metadata = {'url': '/servicestats'} diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py new file mode 100644 index 0000000000000..37ab90d2bb9da --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py @@ -0,0 +1,391 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class SkillsetsOperations(object): + """SkillsetsOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~search_service_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create_or_update( + self, + skillset_name, # type: str + skillset, # type: "models.Skillset" + request_options=None, # type: Optional["models.RequestOptions"] + access_condition=None, # type: Optional["models.AccessCondition"] + **kwargs # type: Any + ): + # type: (...) -> "models.Skillset" + """Creates a new skillset in a search service or updates the skillset if it already exists. + + :param skillset_name: The name of the skillset to create or update. + :type skillset_name: str + :param skillset: The skillset containing one or more skills to create or update in a search + service. + :type skillset: ~search_service_client.models.Skillset + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Skillset or the result of cls(response) + :rtype: ~search_service_client.models.Skillset or ~search_service_client.models.Skillset + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + prefer = "return=representation" + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(skillset, 'Skillset') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('Skillset', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('Skillset', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + + def delete( + self, + skillset_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + access_condition=None, # type: Optional["models.AccessCondition"] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes a skillset in a search service. + + :param skillset_name: The name of the skillset to delete. + :type skillset_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + + def get( + self, + skillset_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.Skillset" + """Retrieves a skillset in a search service. + + :param skillset_name: The name of the skillset to retrieve. + :type skillset_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Skillset or the result of cls(response) + :rtype: ~search_service_client.models.Skillset + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Skillset', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + + def list( + self, + select=None, # type: Optional[str] + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.ListSkillsetsResult" + """List all skillsets in a search service. + + :param select: Selects which top-level properties of the skillsets to retrieve. Specified as a + comma-separated list of JSON property names, or '*' for all properties. The default is all + properties. + :type select: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListSkillsetsResult or the result of cls(response) + :rtype: ~search_service_client.models.ListSkillsetsResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ListSkillsetsResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ListSkillsetsResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/skillsets'} + + def create( + self, + skillset, # type: "models.Skillset" + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.Skillset" + """Creates a new skillset in a search service. + + :param skillset: The skillset containing one or more skills to create in a search service. + :type skillset: ~search_service_client.models.Skillset + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Skillset or the result of cls(response) + :rtype: ~search_service_client.models.Skillset + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(skillset, 'Skillset') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('Skillset', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create.metadata = {'url': '/skillsets'} diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py new file mode 100644 index 0000000000000..ff4e69ec5420a --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py @@ -0,0 +1,390 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class SynonymMapsOperations(object): + """SynonymMapsOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~search_service_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create_or_update( + self, + synonym_map_name, # type: str + synonym_map, # type: "models.SynonymMap" + request_options=None, # type: Optional["models.RequestOptions"] + access_condition=None, # type: Optional["models.AccessCondition"] + **kwargs # type: Any + ): + # type: (...) -> "models.SynonymMap" + """Creates a new synonym map or updates a synonym map if it already exists. + + :param synonym_map_name: The name of the synonym map to create or update. + :type synonym_map_name: str + :param synonym_map: The definition of the synonym map to create or update. + :type synonym_map: ~search_service_client.models.SynonymMap + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SynonymMap or the result of cls(response) + :rtype: ~search_service_client.models.SynonymMap or ~search_service_client.models.SynonymMap + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + prefer = "return=representation" + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(synonym_map, 'SynonymMap') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SynonymMap', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('SynonymMap', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create_or_update.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + + def delete( + self, + synonym_map_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + access_condition=None, # type: Optional["models.AccessCondition"] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes a synonym map. + + :param synonym_map_name: The name of the synonym map to delete. + :type synonym_map_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :param access_condition: Parameter group. + :type access_condition: ~search_service_client.models.AccessCondition + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + _if_match = None + _if_none_match = None + if access_condition is not None: + _if_match = access_condition.if_match + _if_none_match = access_condition.if_none_match + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) + + delete.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + + def get( + self, + synonym_map_name, # type: str + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.SynonymMap" + """Retrieves a synonym map definition. + + :param synonym_map_name: The name of the synonym map to retrieve. + :type synonym_map_name: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SynonymMap or the result of cls(response) + :rtype: ~search_service_client.models.SynonymMap + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('SynonymMap', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + + def list( + self, + select=None, # type: Optional[str] + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.ListSynonymMapsResult" + """Lists all synonym maps available for a search service. + + :param select: Selects which top-level properties of the synonym maps to retrieve. Specified as + a comma-separated list of JSON property names, or '*' for all properties. The default is all + properties. + :type select: str + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListSynonymMapsResult or the result of cls(response) + :rtype: ~search_service_client.models.ListSynonymMapsResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ListSynonymMapsResult"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('ListSynonymMapsResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + list.metadata = {'url': '/synonymmaps'} + + def create( + self, + synonym_map, # type: "models.SynonymMap" + request_options=None, # type: Optional["models.RequestOptions"] + **kwargs # type: Any + ): + # type: (...) -> "models.SynonymMap" + """Creates a new synonym map. + + :param synonym_map: The definition of the synonym map to create. + :type synonym_map: ~search_service_client.models.SynonymMap + :param request_options: Parameter group. + :type request_options: ~search_service_client.models.RequestOptions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SynonymMap or the result of cls(response) + :rtype: ~search_service_client.models.SynonymMap + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + + _x_ms_client_request_id = None + if request_options is not None: + _x_ms_client_request_id = request_options.x_ms_client_request_id + api_version = "2019-05-06-Preview" + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _x_ms_client_request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(synonym_map, 'SynonymMap') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('SynonymMap', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + create.metadata = {'url': '/synonymmaps'} diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/py.typed b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/py.typed new file mode 100644 index 0000000000000..e5aff4f83af86 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_search_service_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_search_service_client.py new file mode 100644 index 0000000000000..214d5925357e0 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_search_service_client.py @@ -0,0 +1,82 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING + +from azure.core.pipeline.policies import HeadersPolicy +from azure.core.tracing.decorator import distributed_trace +from ._generated import SearchServiceClient as _SearchServiceClient +from .._version import VERSION + +if TYPE_CHECKING: + # pylint:disable=unused-import,ungrouped-imports + from typing import Any, Union + from .. import SearchApiKeyCredential + + +class SearchServiceClient(object): + """A client to interact with an existing Azure search service. + + :param endpoint: The URL endpoint of an Azure search service + :type endpoint: str + :param credential: A credential to authorize search client requests + :type credential: SearchApiKeyCredential + + .. admonition:: Example: + + .. literalinclude:: ../samples/sample_authentication.py + :start-after: [START create_search_service_client_with_key] + :end-before: [END create_search_service_client_with_key] + :language: python + :dedent: 4 + :caption: Creating the SearchServiceClient with an API key. + """ + + def __init__(self, endpoint, credential, **kwargs): + # type: (str, SearchApiKeyCredential, **Any) -> None + + headers_policy = HeadersPolicy( + { + "api-key": credential.api_key, + "Accept": "application/json;odata.metadata=minimal", + } + ) + + self._endpoint = endpoint # type: str + self._client = _SearchServiceClient( + endpoint=endpoint, + headers_policy=headers_policy, + sdk_moniker="search/{}".format(VERSION), + **kwargs + ) # type: _SearchServiceClient + + def __repr__(self): + # type: () -> str + return "".format(repr(self._endpoint))[:1024] + + @distributed_trace + def get_service_statistics(self, **kwargs): + # type: (**Any) -> dict + """Get service level statistics for a search service. + + """ + result = self._client.get_service_statistics(**kwargs) + return result.as_dict() + + def close(self): + # type: () -> None + """Close the :class:`~azure.search.SearchServiceClient` session. + + """ + return self._client.close() + + def __enter__(self): + # type: () -> SearchServiceClient + self._client.__enter__() # pylint:disable=no-member + return self + + def __exit__(self, *args): + # type: (*Any) -> None + self._client.__exit__(*args) # pylint:disable=no-member diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/__init__.py new file mode 100644 index 0000000000000..0d3142f14092a --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +from ._search_service_client_async import SearchServiceClient + +__all__ = "SearchServiceClient" diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_search_service_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_search_service_client_async.py new file mode 100644 index 0000000000000..7998dfce3257a --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_search_service_client_async.py @@ -0,0 +1,74 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING + +from azure.core.pipeline.policies import HeadersPolicy +from azure.core.tracing.decorator_async import distributed_trace_async +from .._generated.aio import SearchServiceClient as _SearchServiceClient +from ..._version import VERSION + +if TYPE_CHECKING: + # pylint:disable=unused-import,ungrouped-imports + from typing import Any, Union + from ... import SearchApiKeyCredential + + +class SearchServiceClient(object): + """A client to interact with an existing Azure search service. + + :param endpoint: The URL endpoint of an Azure search service + :type endpoint: str + :param credential: A credential to authorize search client requests + :type credential: SearchApiKeyCredential + + """ + + def __init__(self, endpoint, credential, **kwargs): + # type: (str, SearchApiKeyCredential, **Any) -> None + + headers_policy = HeadersPolicy( + { + "api-key": credential.api_key, + "Accept": "application/json;odata.metadata=minimal", + } + ) + + self._endpoint = endpoint # type: str + self._client = _SearchServiceClient( + endpoint=endpoint, + headers_policy=headers_policy, + sdk_moniker="search/{}".format(VERSION), + **kwargs + ) # type: _SearchServiceClient + + def __repr__(self): + # type: () -> str + return "".format(repr(self._endpoint))[:1024] + + @distributed_trace_async + async def get_service_statistics(self, **kwargs): + # type: (**Any) -> dict + """Get service level statistics for a search service. + + """ + result = await self._client.get_service_statistics(**kwargs) + return result.as_dict() + + async def close(self): + # type: () -> None + """Close the :class:`~azure.search.SearchServiceClient` session. + + """ + return await self._client.close() + + async def __aenter__(self): + # type: () -> SearchServiceClient + await self._client.__aenter__() # pylint:disable=no-member + return self + + async def __aexit__(self, *args): + # type: (*Any) -> None + await self._client.__aexit__(*args) # pylint:disable=no-member diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio.py b/sdk/search/azure-search-documents/azure/search/documents/aio.py index e982779dc5a84..395e4eceb156a 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio.py @@ -25,5 +25,6 @@ # -------------------------------------------------------------------------- from ._index.aio import AsyncSearchItemPaged, SearchIndexClient +from ._service.aio import SearchServiceClient -__all__ = ("AsyncSearchItemPaged", "SearchIndexClient") +__all__ = ("AsyncSearchItemPaged", "SearchIndexClient", "SearchServiceClient") diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_async_get_document_count.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_async_get_document_count.yaml similarity index 73% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_async_get_document_count.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_async_get_document_count.yaml index 7b3b53a59b439..2dad6d34e0d3d 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_async_get_document_count.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_async_get_document_count.yaml @@ -7,9 +7,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - CFF34E79768886C9F7A9C09F2C7BD9C4 + - 3E09B72DB905597AB9127D9DBFBE441E method: GET - uri: https://searchaab2129d.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://search22c31514.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF10" @@ -18,17 +18,17 @@ interactions: content-encoding: gzip content-length: '127' content-type: text/plain - date: Mon, 16 Mar 2020 17:49:31 GMT - elapsed-time: '131' + date: Thu, 19 Mar 2020 00:18:31 GMT + elapsed-time: '60' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 7d87c22c-67ae-11ea-9335-8c8590507855 + request-id: 29b3bdde-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchaab2129d.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + url: https://search22c31514.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_autocomplete.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_autocomplete.yaml similarity index 77% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_autocomplete.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_autocomplete.yaml index ce0b029793fa5..554cf9eeae7eb 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_autocomplete.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_autocomplete.yaml @@ -11,9 +11,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - B156DF900D38F51EE39DA36D9D032630 + - 53A9F61B8C003E2D608580D9822FE054 method: POST - uri: https://searche84d0dac.search.windows.net/indexes('drgqefsg')/docs/search.post.autocomplete?api-version=2019-05-06-Preview + uri: https://search42ca1023.search.windows.net/indexes('drgqefsg')/docs/search.post.autocomplete?api-version=2019-05-06-Preview response: body: string: '{"value":[{"text":"motel","queryPlusText":"motel"}]}' @@ -22,17 +22,17 @@ interactions: content-encoding: gzip content-length: '163' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:49:43 GMT - elapsed-time: '260' + date: Thu, 19 Mar 2020 00:18:41 GMT + elapsed-time: '99' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 84abdd54-67ae-11ea-9335-8c8590507855 + request-id: 3015052a-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searche84d0dac.search.windows.net/indexes('drgqefsg')/docs/search.post.autocomplete?api-version=2019-05-06-Preview + url: https://search42ca1023.search.windows.net/indexes('drgqefsg')/docs/search.post.autocomplete?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_delete_documents_existing.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_delete_documents_existing.yaml similarity index 74% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_delete_documents_existing.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_delete_documents_existing.yaml index 8894d72c7144d..d06b27d49e31c 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_delete_documents_existing.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_delete_documents_existing.yaml @@ -12,9 +12,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 9806A2457E1DCAD1E0DAD1EDCB022C21 + - C70E2AE3883C0E258491FF305B99C0B8 method: POST - uri: https://searchbd291308.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://search37b1157f.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"3","status":true,"errorMessage":null,"statusCode":200},{"key":"4","status":true,"errorMessage":null,"statusCode":200}]}' @@ -23,19 +23,19 @@ interactions: content-encoding: gzip content-length: '190' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:49:55 GMT - elapsed-time: '46' + date: Thu, 19 Mar 2020 00:18:52 GMT + elapsed-time: '110' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 8b737f34-67ae-11ea-9335-8c8590507855 + request-id: 3686dda2-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchbd291308.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + url: https://search37b1157f.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview - request: body: null headers: @@ -44,9 +44,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 9806A2457E1DCAD1E0DAD1EDCB022C21 + - C70E2AE3883C0E258491FF305B99C0B8 method: GET - uri: https://searchbd291308.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://search37b1157f.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF8" @@ -55,19 +55,19 @@ interactions: content-encoding: gzip content-length: '126' content-type: text/plain - date: Mon, 16 Mar 2020 17:49:58 GMT - elapsed-time: '4' + date: Thu, 19 Mar 2020 00:18:55 GMT + elapsed-time: '3' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 8d558fea-67ae-11ea-9335-8c8590507855 + request-id: 38720c7c-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchbd291308.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + url: https://search37b1157f.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview - request: body: null headers: @@ -76,25 +76,25 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 9806A2457E1DCAD1E0DAD1EDCB022C21 + - C70E2AE3883C0E258491FF305B99C0B8 method: GET - uri: https://searchbd291308.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview + uri: https://search37b1157f.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview response: body: string: '' headers: cache-control: no-cache content-length: '0' - date: Mon, 16 Mar 2020 17:49:58 GMT - elapsed-time: '8' + date: Thu, 19 Mar 2020 00:18:55 GMT + elapsed-time: '4' expires: '-1' pragma: no-cache - request-id: 8d5aeb0c-67ae-11ea-9335-8c8590507855 + request-id: 3876ff66-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains status: code: 404 message: Not Found - url: https://searchbd291308.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview + url: https://search37b1157f.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview - request: body: null headers: @@ -103,23 +103,23 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 9806A2457E1DCAD1E0DAD1EDCB022C21 + - C70E2AE3883C0E258491FF305B99C0B8 method: GET - uri: https://searchbd291308.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + uri: https://search37b1157f.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview response: body: string: '' headers: cache-control: no-cache content-length: '0' - date: Mon, 16 Mar 2020 17:49:58 GMT + date: Thu, 19 Mar 2020 00:18:55 GMT elapsed-time: '4' expires: '-1' pragma: no-cache - request-id: 8d60a4d4-67ae-11ea-9335-8c8590507855 + request-id: 387c2b58-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains status: code: 404 message: Not Found - url: https://searchbd291308.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + url: https://search37b1157f.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_delete_documents_missing.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_delete_documents_missing.yaml similarity index 74% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_delete_documents_missing.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_delete_documents_missing.yaml index bd39bdcc112f4..2e5ec8fc20894 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_delete_documents_missing.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_delete_documents_missing.yaml @@ -12,9 +12,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 987215610AC2AEF2F9D9ED9537B44CFA + - F15F3409B0BC9E7D019FF18251E360EF method: POST - uri: https://searchaa131297.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://search2224150e.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"1000","status":true,"errorMessage":null,"statusCode":200},{"key":"4","status":true,"errorMessage":null,"statusCode":200}]}' @@ -23,19 +23,19 @@ interactions: content-encoding: gzip content-length: '193' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:50:10 GMT - elapsed-time: '85' + date: Thu, 19 Mar 2020 00:19:06 GMT + elapsed-time: '82' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 94518df8-67ae-11ea-9335-8c8590507855 + request-id: 3efc1c18-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchaa131297.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + url: https://search2224150e.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview - request: body: null headers: @@ -44,9 +44,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 987215610AC2AEF2F9D9ED9537B44CFA + - F15F3409B0BC9E7D019FF18251E360EF method: GET - uri: https://searchaa131297.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://search2224150e.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF9" @@ -55,19 +55,19 @@ interactions: content-encoding: gzip content-length: '126' content-type: text/plain - date: Mon, 16 Mar 2020 17:50:13 GMT - elapsed-time: '3' + date: Thu, 19 Mar 2020 00:19:09 GMT + elapsed-time: '4' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 963a5122-67ae-11ea-9335-8c8590507855 + request-id: 40e33f98-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchaa131297.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + url: https://search2224150e.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview - request: body: null headers: @@ -76,25 +76,25 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 987215610AC2AEF2F9D9ED9537B44CFA + - F15F3409B0BC9E7D019FF18251E360EF method: GET - uri: https://searchaa131297.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + uri: https://search2224150e.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview response: body: string: '' headers: cache-control: no-cache content-length: '0' - date: Mon, 16 Mar 2020 17:50:13 GMT - elapsed-time: '4' + date: Thu, 19 Mar 2020 00:19:09 GMT + elapsed-time: '3' expires: '-1' pragma: no-cache - request-id: 963f8aa2-67ae-11ea-9335-8c8590507855 + request-id: 40e86518-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains status: code: 404 message: Not Found - url: https://searchaa131297.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + url: https://search2224150e.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview - request: body: null headers: @@ -103,23 +103,23 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 987215610AC2AEF2F9D9ED9537B44CFA + - F15F3409B0BC9E7D019FF18251E360EF method: GET - uri: https://searchaa131297.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + uri: https://search2224150e.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview response: body: string: '' headers: cache-control: no-cache content-length: '0' - date: Mon, 16 Mar 2020 17:50:13 GMT + date: Thu, 19 Mar 2020 00:19:09 GMT elapsed-time: '4' expires: '-1' pragma: no-cache - request-id: 9644970e-67ae-11ea-9335-8c8590507855 + request-id: 40ed3be2-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains status: code: 404 message: Not Found - url: https://searchaa131297.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + url: https://search2224150e.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_document.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_document.yaml similarity index 84% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_document.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_document.yaml index 731479e4f8827..a4aa3a38dcc8a 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_document.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_document.yaml @@ -7,9 +7,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 6A664EE78DD8350E4834FC69AE123F60 + - 794D643BAA2C0B61F7A27F0581B89680 method: GET - uri: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('1')?api-version=2019-05-06-Preview + uri: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('1')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"1","hotelName":"Fancy Stay","description":"Best hotel in @@ -25,19 +25,19 @@ interactions: content-encoding: gzip content-length: '748' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:50:24 GMT - elapsed-time: '12' + date: Thu, 19 Mar 2020 00:19:20 GMT + elapsed-time: '145' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 9d171e80-67ae-11ea-9335-8c8590507855 + request-id: 46ffe552-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('1')?api-version=2019-05-06-Preview + url: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('1')?api-version=2019-05-06-Preview - request: body: null headers: @@ -46,9 +46,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 6A664EE78DD8350E4834FC69AE123F60 + - 794D643BAA2C0B61F7A27F0581B89680 method: GET - uri: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('2')?api-version=2019-05-06-Preview + uri: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('2')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"2","hotelName":"Roach Motel","description":"Cheapest hotel @@ -59,19 +59,19 @@ interactions: content-encoding: gzip content-length: '449' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:50:24 GMT - elapsed-time: '9' + date: Thu, 19 Mar 2020 00:19:20 GMT + elapsed-time: '5' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 9d28fc04-67ae-11ea-9335-8c8590507855 + request-id: 4726ef12-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('2')?api-version=2019-05-06-Preview + url: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('2')?api-version=2019-05-06-Preview - request: body: null headers: @@ -80,9 +80,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 6A664EE78DD8350E4834FC69AE123F60 + - 794D643BAA2C0B61F7A27F0581B89680 method: GET - uri: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview + uri: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"3","hotelName":"EconoStay","description":"Very popular @@ -92,19 +92,19 @@ interactions: content-encoding: gzip content-length: '438' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:50:24 GMT + date: Thu, 19 Mar 2020 00:19:20 GMT elapsed-time: '4' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 9d2ef0f0-67ae-11ea-9335-8c8590507855 + request-id: 472c9462-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview + url: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview - request: body: null headers: @@ -113,9 +113,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 6A664EE78DD8350E4834FC69AE123F60 + - 794D643BAA2C0B61F7A27F0581B89680 method: GET - uri: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + uri: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"4","hotelName":"Express Rooms","description":"Pretty good @@ -125,19 +125,19 @@ interactions: content-encoding: gzip content-length: '422' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:50:24 GMT + date: Thu, 19 Mar 2020 00:19:20 GMT elapsed-time: '3' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 9d33dbf6-67ae-11ea-9335-8c8590507855 + request-id: 4731ada8-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + url: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview - request: body: null headers: @@ -146,9 +146,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 6A664EE78DD8350E4834FC69AE123F60 + - 794D643BAA2C0B61F7A27F0581B89680 method: GET - uri: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('5')?api-version=2019-05-06-Preview + uri: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('5')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"5","hotelName":"Comfy Place","description":"Another good @@ -158,19 +158,19 @@ interactions: content-encoding: gzip content-length: '424' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:50:24 GMT - elapsed-time: '3' + date: Thu, 19 Mar 2020 00:19:20 GMT + elapsed-time: '4' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 9d38c71a-67ae-11ea-9335-8c8590507855 + request-id: 4736b366-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('5')?api-version=2019-05-06-Preview + url: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('5')?api-version=2019-05-06-Preview - request: body: null headers: @@ -179,9 +179,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 6A664EE78DD8350E4834FC69AE123F60 + - 794D643BAA2C0B61F7A27F0581B89680 method: GET - uri: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('6')?api-version=2019-05-06-Preview + uri: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('6')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"6","hotelName":null,"description":"Surprisingly expensive. @@ -191,19 +191,19 @@ interactions: content-encoding: gzip content-length: '301' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:50:24 GMT + date: Thu, 19 Mar 2020 00:19:20 GMT elapsed-time: '4' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 9d3dcd50-67ae-11ea-9335-8c8590507855 + request-id: 473beeda-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('6')?api-version=2019-05-06-Preview + url: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('6')?api-version=2019-05-06-Preview - request: body: null headers: @@ -212,9 +212,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 6A664EE78DD8350E4834FC69AE123F60 + - 794D643BAA2C0B61F7A27F0581B89680 method: GET - uri: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('7')?api-version=2019-05-06-Preview + uri: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('7')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"7","hotelName":"Modern Stay","description":"Modern architecture, @@ -225,19 +225,19 @@ interactions: content-encoding: gzip content-length: '357' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:50:24 GMT - elapsed-time: '4' + date: Thu, 19 Mar 2020 00:19:20 GMT + elapsed-time: '3' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 9d4366de-67ae-11ea-9335-8c8590507855 + request-id: 4740fcb8-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('7')?api-version=2019-05-06-Preview + url: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('7')?api-version=2019-05-06-Preview - request: body: null headers: @@ -246,9 +246,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 6A664EE78DD8350E4834FC69AE123F60 + - 794D643BAA2C0B61F7A27F0581B89680 method: GET - uri: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('8')?api-version=2019-05-06-Preview + uri: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('8')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"8","hotelName":null,"description":"Has some road noise @@ -260,19 +260,19 @@ interactions: content-encoding: gzip content-length: '411' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:50:24 GMT - elapsed-time: '3' + date: Thu, 19 Mar 2020 00:19:20 GMT + elapsed-time: '4' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 9d49420c-67ae-11ea-9335-8c8590507855 + request-id: 47462634-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('8')?api-version=2019-05-06-Preview + url: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('8')?api-version=2019-05-06-Preview - request: body: null headers: @@ -281,9 +281,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 6A664EE78DD8350E4834FC69AE123F60 + - 794D643BAA2C0B61F7A27F0581B89680 method: GET - uri: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('9')?api-version=2019-05-06-Preview + uri: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('9')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"9","hotelName":"Secret Point Motel","description":"The @@ -309,19 +309,19 @@ interactions: content-encoding: gzip content-length: '1061' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:50:24 GMT - elapsed-time: '17' + date: Thu, 19 Mar 2020 00:19:20 GMT + elapsed-time: '8' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 9d4e234e-67ae-11ea-9335-8c8590507855 + request-id: 474b6a04-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('9')?api-version=2019-05-06-Preview + url: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('9')?api-version=2019-05-06-Preview - request: body: null headers: @@ -330,9 +330,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 6A664EE78DD8350E4834FC69AE123F60 + - 794D643BAA2C0B61F7A27F0581B89680 method: GET - uri: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('10')?api-version=2019-05-06-Preview + uri: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('10')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"10","hotelName":"Countryside Hotel","description":"Save @@ -354,17 +354,17 @@ interactions: content-encoding: gzip content-length: '938' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:50:24 GMT - elapsed-time: '4' + date: Thu, 19 Mar 2020 00:19:20 GMT + elapsed-time: '3' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 9d568c0a-67ae-11ea-9335-8c8590507855 + request-id: 47517868-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searche7410d98.search.windows.net/indexes('drgqefsg')/docs('10')?api-version=2019-05-06-Preview + url: https://search41be100f.search.windows.net/indexes('drgqefsg')/docs('10')?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_document_missing.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_document_missing.yaml similarity index 68% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_document_missing.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_document_missing.yaml index c5ca05aa08366..fa0c217db274a 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_document_missing.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_document_missing.yaml @@ -7,23 +7,23 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 6AD5FBE7A7A03FEB36BFAD936F1D225D + - ED390E220F1E902275D59A5047360817 method: GET - uri: https://search630210f1.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + uri: https://searchd1281368.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview response: body: string: '' headers: cache-control: no-cache content-length: '0' - date: Mon, 16 Mar 2020 17:50:37 GMT - elapsed-time: '654' + date: Thu, 19 Mar 2020 00:19:30 GMT + elapsed-time: '62' expires: '-1' pragma: no-cache - request-id: a4891de4-67ae-11ea-9335-8c8590507855 + request-id: 4d880116-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains status: code: 404 message: Not Found - url: https://search630210f1.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + url: https://searchd1281368.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_counts.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_counts.yaml similarity index 96% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_counts.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_counts.yaml index 60132ac70a6ae..2a4be953b022d 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_counts.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_counts.yaml @@ -11,9 +11,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - D81974837F6C2E7D607ED2B3267C1A3A + - 3778D42FDFB4652F21C284FF151A97EC method: POST - uri: https://search30f20faa.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search97b31221.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.score":0.48248714,"hotelId":"10","hotelName":"Countryside @@ -65,19 +65,19 @@ interactions: content-encoding: gzip content-length: '2377' content-type: application/json; odata.metadata=none - date: Wed, 18 Mar 2020 17:40:57 GMT - elapsed-time: '178' + date: Thu, 19 Mar 2020 00:19:41 GMT + elapsed-time: '139' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 9fd37852-693f-11ea-8daf-8c8590507855 + request-id: 5390c94e-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search30f20faa.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + url: https://search97b31221.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview - request: body: '{"count": true, "search": "hotel"}' headers: @@ -90,9 +90,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - D81974837F6C2E7D607ED2B3267C1A3A + - 3778D42FDFB4652F21C284FF151A97EC method: POST - uri: https://search30f20faa.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search97b31221.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"@odata.count":7,"value":[{"@search.score":0.48248714,"hotelId":"10","hotelName":"Countryside @@ -144,17 +144,17 @@ interactions: content-encoding: gzip content-length: '2388' content-type: application/json; odata.metadata=none - date: Wed, 18 Mar 2020 17:40:57 GMT + date: Thu, 19 Mar 2020 00:19:41 GMT elapsed-time: '6' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: a00b487c-693f-11ea-8daf-8c8590507855 + request-id: 53b85e6e-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search30f20faa.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + url: https://search97b31221.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_coverage.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_coverage.yaml similarity index 96% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_coverage.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_coverage.yaml index 0aae199715283..42bcdbd7ed594 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_coverage.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_coverage.yaml @@ -11,9 +11,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - FA4BA6D1C4A038BB6FE23139D81DB540 + - 97B456BC0C412F6AD3658EB5DCFCA411 method: POST - uri: https://search5114105a.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://searchbcc312d1.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.score":0.48248714,"hotelId":"10","hotelName":"Countryside @@ -65,19 +65,19 @@ interactions: content-encoding: gzip content-length: '2377' content-type: application/json; odata.metadata=none - date: Wed, 18 Mar 2020 17:44:29 GMT - elapsed-time: '177' + date: Thu, 19 Mar 2020 00:19:52 GMT + elapsed-time: '21' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 1dd6c3e4-6940-11ea-8134-8c8590507855 + request-id: 5a3c2edc-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search5114105a.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + url: https://searchbcc312d1.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview - request: body: '{"minimumCoverage": 50.0, "search": "hotel"}' headers: @@ -90,9 +90,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - FA4BA6D1C4A038BB6FE23139D81DB540 + - 97B456BC0C412F6AD3658EB5DCFCA411 method: POST - uri: https://search5114105a.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://searchbcc312d1.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"@search.coverage":100.0,"value":[{"@search.score":0.48248714,"hotelId":"10","hotelName":"Countryside @@ -144,17 +144,17 @@ interactions: content-encoding: gzip content-length: '2389' content-type: application/json; odata.metadata=none - date: Wed, 18 Mar 2020 17:44:29 GMT - elapsed-time: '10' + date: Thu, 19 Mar 2020 00:19:52 GMT + elapsed-time: '6' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: 1e089112-6940-11ea-8134-8c8590507855 + request-id: 5a50b104-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search5114105a.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + url: https://searchbcc312d1.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_facets_none.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_facets_none.yaml similarity index 87% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_facets_none.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_facets_none.yaml index 75ec8c3829a31..0c2ffceb8c9da 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_facets_none.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_facets_none.yaml @@ -11,9 +11,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 49E73CA9570DF3F22BCCE297A1E3F246 + - 8B5AE4B2C766FF5DD809B8E3F109A5BB method: POST - uri: https://search84101193.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://searchf724140a.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.score":0.2423066,"hotelName":"Countryside Hotel","description":"Save @@ -32,17 +32,17 @@ interactions: content-encoding: gzip content-length: '609' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:50:49 GMT - elapsed-time: '239' + date: Thu, 19 Mar 2020 00:20:03 GMT + elapsed-time: '77' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: abbcc318-67ae-11ea-9335-8c8590507855 + request-id: 60bf7282-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search84101193.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + url: https://searchf724140a.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_facets_result.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_facets_result.yaml similarity index 87% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_facets_result.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_facets_result.yaml index 94e0512392229..582f3e0851b29 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_facets_result.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_facets_result.yaml @@ -11,9 +11,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 21DADBBFAF9DAA61F93B16277D0648C2 + - 3F21CD52B96EAA8D000A311E70071FB5 method: POST - uri: https://searcha8ac1282.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search20bd14f9.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"@search.facets":{"category":[{"count":4,"value":"Budget"},{"count":1,"value":"Luxury"}]},"value":[{"@search.score":0.2423066,"hotelName":"Countryside @@ -32,17 +32,17 @@ interactions: content-encoding: gzip content-length: '646' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:51:00 GMT - elapsed-time: '300' + date: Thu, 19 Mar 2020 00:20:14 GMT + elapsed-time: '111' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: b2219e5e-67ae-11ea-9335-8c8590507855 + request-id: 670a83fc-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searcha8ac1282.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + url: https://search20bd14f9.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_filter.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_filter.yaml similarity index 85% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_filter.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_filter.yaml index 81fc8b8ea9aa7..95e3aade9d099 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_filter.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_filter.yaml @@ -12,9 +12,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 448BCA630718F70AE46F9CD040619CD7 + - EF6C82740C2809864DC66C2A0167A600 method: POST - uri: https://search30b50f94.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search9776120b.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.score":0.19169211,"hotelName":"Express Rooms","description":"Pretty @@ -29,17 +29,17 @@ interactions: content-encoding: gzip content-length: '441' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:51:11 GMT - elapsed-time: '127' + date: Thu, 19 Mar 2020 00:20:24 GMT + elapsed-time: '110' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: b927273c-67ae-11ea-9335-8c8590507855 + request-id: 6e06b676-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search30b50f94.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + url: https://search9776120b.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_simple.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_simple.yaml similarity index 94% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_simple.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_simple.yaml index 4786af70a5c1d..d03454a9d839f 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_get_search_simple.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_get_search_simple.yaml @@ -11,9 +11,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - DE659731E47A04B401EB83D8FECD95EE + - EC787537A28DE28386557BA4856835BA method: POST - uri: https://search30fc0f98.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search97bd120f.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.score":0.48248714,"hotelId":"10","hotelName":"Countryside @@ -65,19 +65,19 @@ interactions: content-encoding: gzip content-length: '2377' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:51:25 GMT - elapsed-time: '52' + date: Thu, 19 Mar 2020 00:20:37 GMT + elapsed-time: '85' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: c1961be4-67ae-11ea-9335-8c8590507855 + request-id: 74f29356-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search30fc0f98.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + url: https://search97bd120f.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview - request: body: '{"search": "motel"}' headers: @@ -90,9 +90,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - DE659731E47A04B401EB83D8FECD95EE + - EC787537A28DE28386557BA4856835BA method: POST - uri: https://search30fc0f98.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search97bd120f.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.score":1.2368374,"hotelId":"2","hotelName":"Roach @@ -120,17 +120,17 @@ interactions: content-encoding: gzip content-length: '1271' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:51:25 GMT - elapsed-time: '9' + date: Thu, 19 Mar 2020 00:20:37 GMT + elapsed-time: '7' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: c1af631a-67ae-11ea-9335-8c8590507855 + request-id: 750fe9ce-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search30fc0f98.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + url: https://search97bd120f.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_merge_documents_existing.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_merge_documents_existing.yaml similarity index 79% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_merge_documents_existing.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_merge_documents_existing.yaml index 2eb5d1dc02eeb..5320d95343c11 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_merge_documents_existing.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_merge_documents_existing.yaml @@ -12,9 +12,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 96D576CAA3CEA9F98958474FAF831934 + - 4AD112EF99BD46B8CCB7B69FBE793E8A method: POST - uri: https://searchaaf712a5.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://search2308151c.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"3","status":true,"errorMessage":null,"statusCode":200},{"key":"4","status":true,"errorMessage":null,"statusCode":200}]}' @@ -23,19 +23,19 @@ interactions: content-encoding: gzip content-length: '190' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:51:40 GMT - elapsed-time: '195' + date: Thu, 19 Mar 2020 00:20:48 GMT + elapsed-time: '101' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: ca8f8dac-67ae-11ea-9335-8c8590507855 + request-id: 7b7a399a-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchaaf712a5.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + url: https://search2308151c.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview - request: body: null headers: @@ -44,9 +44,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 96D576CAA3CEA9F98958474FAF831934 + - 4AD112EF99BD46B8CCB7B69FBE793E8A method: GET - uri: https://searchaaf712a5.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://search2308151c.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF10" @@ -55,19 +55,19 @@ interactions: content-encoding: gzip content-length: '127' content-type: text/plain - date: Mon, 16 Mar 2020 17:51:44 GMT + date: Thu, 19 Mar 2020 00:20:50 GMT elapsed-time: '4' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: cc87edde-67ae-11ea-9335-8c8590507855 + request-id: 7d66fdb0-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchaaf712a5.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + url: https://search2308151c.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview - request: body: null headers: @@ -76,9 +76,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 96D576CAA3CEA9F98958474FAF831934 + - 4AD112EF99BD46B8CCB7B69FBE793E8A method: GET - uri: https://searchaaf712a5.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview + uri: https://search2308151c.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"3","hotelName":"EconoStay","description":"Very popular @@ -88,19 +88,19 @@ interactions: content-encoding: gzip content-length: '438' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:51:44 GMT - elapsed-time: '12' + date: Thu, 19 Mar 2020 00:20:50 GMT + elapsed-time: '8' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: cc8d38a2-67ae-11ea-9335-8c8590507855 + request-id: 7d6c9fc2-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchaaf712a5.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview + url: https://search2308151c.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview - request: body: null headers: @@ -109,9 +109,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 96D576CAA3CEA9F98958474FAF831934 + - 4AD112EF99BD46B8CCB7B69FBE793E8A method: GET - uri: https://searchaaf712a5.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + uri: https://search2308151c.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"4","hotelName":"Express Rooms","description":"Pretty good @@ -121,17 +121,17 @@ interactions: content-encoding: gzip content-length: '422' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:51:44 GMT - elapsed-time: '5' + date: Thu, 19 Mar 2020 00:20:50 GMT + elapsed-time: '4' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: cc941bae-67ae-11ea-9335-8c8590507855 + request-id: 7d72da2c-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchaaf712a5.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + url: https://search2308151c.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_merge_documents_missing.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_merge_documents_missing.yaml similarity index 68% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_merge_documents_missing.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_merge_documents_missing.yaml index d9dec14b38c1e..8ca9369bc4fd8 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_merge_documents_missing.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_merge_documents_missing.yaml @@ -12,9 +12,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 93A5557231073F985B70E3930E3B0008 + - E9C841AA4C505EA80436C967A6F701BE method: POST - uri: https://search98441234.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://searchdde14ab.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"1000","status":false,"errorMessage":"Document not @@ -24,19 +24,19 @@ interactions: content-encoding: gzip content-length: '225' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:51:55 GMT - elapsed-time: '191' + date: Thu, 19 Mar 2020 00:21:02 GMT + elapsed-time: '86' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: d34a0b7a-67ae-11ea-9335-8c8590507855 + request-id: 83e9990e-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 207 message: Multi-Status - url: https://search98441234.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + url: https://searchdde14ab.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview - request: body: null headers: @@ -45,9 +45,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 93A5557231073F985B70E3930E3B0008 + - E9C841AA4C505EA80436C967A6F701BE method: GET - uri: https://search98441234.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://searchdde14ab.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF10" @@ -56,19 +56,19 @@ interactions: content-encoding: gzip content-length: '127' content-type: text/plain - date: Mon, 16 Mar 2020 17:51:58 GMT - elapsed-time: '6' + date: Thu, 19 Mar 2020 00:21:05 GMT + elapsed-time: '3' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: d542112a-67ae-11ea-9335-8c8590507855 + request-id: 85d1da92-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search98441234.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + url: https://searchdde14ab.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview - request: body: null headers: @@ -77,25 +77,25 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 93A5557231073F985B70E3930E3B0008 + - E9C841AA4C505EA80436C967A6F701BE method: GET - uri: https://search98441234.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + uri: https://searchdde14ab.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview response: body: string: '' headers: cache-control: no-cache content-length: '0' - date: Mon, 16 Mar 2020 17:51:58 GMT - elapsed-time: '9' + date: Thu, 19 Mar 2020 00:21:05 GMT + elapsed-time: '4' expires: '-1' pragma: no-cache - request-id: d5478664-67ae-11ea-9335-8c8590507855 + request-id: 85d7a3c8-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains status: code: 404 message: Not Found - url: https://search98441234.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + url: https://searchdde14ab.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview - request: body: null headers: @@ -104,9 +104,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 93A5557231073F985B70E3930E3B0008 + - E9C841AA4C505EA80436C967A6F701BE method: GET - uri: https://search98441234.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + uri: https://searchdde14ab.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"4","hotelName":"Express Rooms","description":"Pretty good @@ -116,17 +116,17 @@ interactions: content-encoding: gzip content-length: '422' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:51:58 GMT - elapsed-time: '11' + date: Thu, 19 Mar 2020 00:21:05 GMT + elapsed-time: '9' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: d54d45c2-67ae-11ea-9335-8c8590507855 + request-id: 85dc6e9e-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search98441234.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + url: https://searchdde14ab.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_merge_or_upload_documents.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_merge_or_upload_documents.yaml similarity index 79% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_merge_or_upload_documents.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_merge_or_upload_documents.yaml index 3f51f778ba3a1..cbec251592743 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_merge_or_upload_documents.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_merge_or_upload_documents.yaml @@ -12,9 +12,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - E802C4423FCD2F970AD87F6AE0EFB895 + - 69FBF7A0FEBE9D7FEBC42032455EEB51 method: POST - uri: https://searchbd5d12ff.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://search37e51576.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"1000","status":true,"errorMessage":null,"statusCode":201},{"key":"4","status":true,"errorMessage":null,"statusCode":200}]}' @@ -23,19 +23,19 @@ interactions: content-encoding: gzip content-length: '196' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:52:11 GMT - elapsed-time: '181' + date: Thu, 19 Mar 2020 00:21:16 GMT + elapsed-time: '114' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: dc63c7d2-67ae-11ea-9335-8c8590507855 + request-id: 8c1389c8-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchbd5d12ff.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + url: https://search37e51576.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview - request: body: null headers: @@ -44,9 +44,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - E802C4423FCD2F970AD87F6AE0EFB895 + - 69FBF7A0FEBE9D7FEBC42032455EEB51 method: GET - uri: https://searchbd5d12ff.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://search37e51576.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF11" @@ -55,19 +55,19 @@ interactions: content-encoding: gzip content-length: '127' content-type: text/plain - date: Mon, 16 Mar 2020 17:52:13 GMT + date: Thu, 19 Mar 2020 00:21:19 GMT elapsed-time: '3' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: de594ff8-67ae-11ea-9335-8c8590507855 + request-id: 8e0348b8-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchbd5d12ff.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + url: https://search37e51576.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview - request: body: null headers: @@ -76,9 +76,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - E802C4423FCD2F970AD87F6AE0EFB895 + - 69FBF7A0FEBE9D7FEBC42032455EEB51 method: GET - uri: https://searchbd5d12ff.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + uri: https://search37e51576.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"1000","hotelName":null,"description":null,"descriptionFr":null,"category":null,"tags":[],"parkingIncluded":null,"smokingAllowed":null,"lastRenovationDate":null,"rating":1,"location":null,"address":null,"rooms":[]}' @@ -87,19 +87,19 @@ interactions: content-encoding: gzip content-length: '257' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:52:13 GMT - elapsed-time: '19' + date: Thu, 19 Mar 2020 00:21:19 GMT + elapsed-time: '8' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: de5f164a-67ae-11ea-9335-8c8590507855 + request-id: 8e0881f2-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchbd5d12ff.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + url: https://search37e51576.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview - request: body: null headers: @@ -108,9 +108,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - E802C4423FCD2F970AD87F6AE0EFB895 + - 69FBF7A0FEBE9D7FEBC42032455EEB51 method: GET - uri: https://searchbd5d12ff.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + uri: https://search37e51576.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"4","hotelName":"Express Rooms","description":"Pretty good @@ -120,17 +120,17 @@ interactions: content-encoding: gzip content-length: '422' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:52:13 GMT + date: Thu, 19 Mar 2020 00:21:19 GMT elapsed-time: '6' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: de665c8e-67ae-11ea-9335-8c8590507855 + request-id: 8e110390-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchbd5d12ff.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + url: https://search37e51576.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_suggest.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_suggest.yaml similarity index 79% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_suggest.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_suggest.yaml index 7b2358b73e270..1ec9edccfee6b 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_suggest.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_suggest.yaml @@ -11,9 +11,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BA0339AEE7CD7E6E8995C893F9DA0CC4 + - 227C94211C71BF9A79BD490B4A9BA1D5 method: POST - uri: https://searcha8490b9c.search.windows.net/indexes('drgqefsg')/docs/search.post.suggest?api-version=2019-05-06-Preview + uri: https://searchf6640e13.search.windows.net/indexes('drgqefsg')/docs/search.post.suggest?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.text":"Cheapest hotel in town. Infact, a motel.","hotelId":"2"},{"@search.text":"Secret @@ -23,17 +23,17 @@ interactions: content-encoding: gzip content-length: '216' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:52:24 GMT - elapsed-time: '231' + date: Thu, 19 Mar 2020 00:21:30 GMT + elapsed-time: '82' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: e4fca4ae-67ae-11ea-9335-8c8590507855 + request-id: 9473f31e-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searcha8490b9c.search.windows.net/indexes('drgqefsg')/docs/search.post.suggest?api-version=2019-05-06-Preview + url: https://searchf6640e13.search.windows.net/indexes('drgqefsg')/docs/search.post.suggest?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_upload_documents_existing.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_upload_documents_existing.yaml similarity index 81% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_upload_documents_existing.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_upload_documents_existing.yaml index d28986720ade7..d3d164e390b93 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_upload_documents_existing.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_upload_documents_existing.yaml @@ -13,9 +13,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 4906251E7C9CCA8F0E068AD07C86A4DB + - 117268E274650997E3026A3675CA6FD0 method: POST - uri: https://searchbf13131a.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://search399b1591.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"1000","status":true,"errorMessage":null,"statusCode":201},{"key":"3","status":true,"errorMessage":null,"statusCode":200}]}' @@ -24,17 +24,17 @@ interactions: content-encoding: gzip content-length: '196' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:52:42 GMT - elapsed-time: '216' + date: Thu, 19 Mar 2020 00:21:42 GMT + elapsed-time: '84' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: ef0f421c-67ae-11ea-9335-8c8590507855 + request-id: 9b8d27b0-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://searchbf13131a.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + url: https://search399b1591.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_upload_documents_new.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_upload_documents_new.yaml similarity index 78% rename from sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_upload_documents_new.yaml rename to sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_upload_documents_new.yaml index 880cf76af4bd0..532c09c3ec827 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_live_async.test_upload_documents_new.yaml +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_index_live_async.test_upload_documents_new.yaml @@ -13,9 +13,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 7ABA45308898B1EB19050748809736AD + - 52BF667B891E6C9E950DAC8FBCEA1522 method: POST - uri: https://search63c010f9.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://searchd1e61370.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"1000","status":true,"errorMessage":null,"statusCode":201},{"key":"1001","status":true,"errorMessage":null,"statusCode":201}]}' @@ -24,19 +24,19 @@ interactions: content-encoding: gzip content-length: '193' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:52:54 GMT - elapsed-time: '155' + date: Thu, 19 Mar 2020 00:21:52 GMT + elapsed-time: '29' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: f623cbd6-67ae-11ea-9335-8c8590507855 + request-id: a1e19006-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search63c010f9.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + url: https://searchd1e61370.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview - request: body: null headers: @@ -45,9 +45,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 7ABA45308898B1EB19050748809736AD + - 52BF667B891E6C9E950DAC8FBCEA1522 method: GET - uri: https://search63c010f9.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://searchd1e61370.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF12" @@ -56,19 +56,19 @@ interactions: content-encoding: gzip content-length: '127' content-type: text/plain - date: Mon, 16 Mar 2020 17:52:57 GMT - elapsed-time: '4' + date: Thu, 19 Mar 2020 00:21:55 GMT + elapsed-time: '44' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: f8168cd0-67ae-11ea-9335-8c8590507855 + request-id: a3c1578a-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search63c010f9.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + url: https://searchd1e61370.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview - request: body: null headers: @@ -77,9 +77,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 7ABA45308898B1EB19050748809736AD + - 52BF667B891E6C9E950DAC8FBCEA1522 method: GET - uri: https://search63c010f9.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + uri: https://searchd1e61370.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"1000","hotelName":"Azure Inn","description":null,"descriptionFr":null,"category":null,"tags":[],"parkingIncluded":null,"smokingAllowed":null,"lastRenovationDate":null,"rating":5,"location":null,"address":null,"rooms":[]}' @@ -88,19 +88,19 @@ interactions: content-encoding: gzip content-length: '267' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:52:57 GMT - elapsed-time: '7' + date: Thu, 19 Mar 2020 00:21:55 GMT + elapsed-time: '6' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: f81bf3aa-67ae-11ea-9335-8c8590507855 + request-id: a3ce5cbe-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search63c010f9.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + url: https://searchd1e61370.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview - request: body: null headers: @@ -109,9 +109,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 7ABA45308898B1EB19050748809736AD + - 52BF667B891E6C9E950DAC8FBCEA1522 method: GET - uri: https://search63c010f9.search.windows.net/indexes('drgqefsg')/docs('1001')?api-version=2019-05-06-Preview + uri: https://searchd1e61370.search.windows.net/indexes('drgqefsg')/docs('1001')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"1001","hotelName":"Redmond Hotel","description":null,"descriptionFr":null,"category":null,"tags":[],"parkingIncluded":null,"smokingAllowed":null,"lastRenovationDate":null,"rating":4,"location":null,"address":null,"rooms":[]}' @@ -120,17 +120,17 @@ interactions: content-encoding: gzip content-length: '268' content-type: application/json; odata.metadata=none - date: Mon, 16 Mar 2020 17:52:57 GMT + date: Thu, 19 Mar 2020 00:21:55 GMT elapsed-time: '4' expires: '-1' odata-version: '4.0' pragma: no-cache preference-applied: odata.include-annotations="*" - request-id: f82229fa-67ae-11ea-9335-8c8590507855 + request-id: a3d5fbea-6977-11ea-bb71-8c8590507855 strict-transport-security: max-age=15724800; includeSubDomains vary: Accept-Encoding status: code: 200 message: OK - url: https://search63c010f9.search.windows.net/indexes('drgqefsg')/docs('1001')?api-version=2019-05-06-Preview + url: https://searchd1e61370.search.windows.net/indexes('drgqefsg')/docs('1001')?api-version=2019-05-06-Preview version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/recordings/test_service_live_async.test_get_service_statistics.yaml b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_service_live_async.test_get_service_statistics.yaml new file mode 100644 index 0000000000000..d53a81cb9827b --- /dev/null +++ b/sdk/search/azure-search-documents/tests/async_tests/recordings/test_service_live_async.test_get_service_statistics.yaml @@ -0,0 +1,34 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json;odata.metadata=minimal + User-Agent: + - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) + api-key: + - FBE49E68975C6A1307FBDED68DAAE9CA + method: GET + uri: https://search24f71524.search.windows.net/servicestats?api-version=2019-05-06-Preview + response: + body: + string: '{"@odata.context":"https://search24f71524.search.windows.net/$metadata#Microsoft.Azure.Search.V2019_05_06_Preview.ServiceStatistics","counters":{"documentCount":{"usage":0,"quota":null},"indexesCount":{"usage":0,"quota":3},"indexersCount":{"usage":0,"quota":3},"dataSourcesCount":{"usage":0,"quota":3},"storageSize":{"usage":0,"quota":52428800},"synonymMaps":{"usage":0,"quota":3},"skillsetCount":{"usage":0,"quota":3}},"limits":{"maxFieldsPerIndex":1000,"maxFieldNestingDepthPerIndex":10,"maxComplexCollectionFieldsPerIndex":40,"maxComplexObjectsInCollectionsPerDocument":3000}}' + headers: + cache-control: no-cache + content-encoding: gzip + content-length: '430' + content-type: application/json; odata.metadata=minimal + date: Thu, 19 Mar 2020 19:51:01 GMT + elapsed-time: '49' + expires: '-1' + odata-version: '4.0' + pragma: no-cache + preference-applied: odata.include-annotations="*" + request-id: f4ecc81c-6a1a-11ea-8e1b-8c8590507855 + strict-transport-security: max-age=15724800; includeSubDomains + vary: Accept-Encoding + status: + code: 200 + message: OK + url: https://search24f71524.search.windows.net/servicestats?api-version=2019-05-06-Preview +version: 1 diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_index_live_async.py similarity index 78% rename from sdk/search/azure-search-documents/tests/async_tests/test_live_async.py rename to sdk/search/azure-search-documents/tests/async_tests/test_index_live_async.py index 23b9cd40eab05..597d3d24a4565 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_index_live_async.py @@ -23,7 +23,12 @@ BATCH = json.load(open(join(CWD, "..", "hotel_small.json"))) from azure.core.exceptions import HttpResponseError -from azure.search.documents import AutocompleteQuery, SearchApiKeyCredential, SearchQuery, SuggestQuery +from azure.search.documents import ( + AutocompleteQuery, + SearchApiKeyCredential, + SearchQuery, + SuggestQuery, +) from azure.search.documents.aio import SearchIndexClient @@ -40,12 +45,14 @@ def run(test_class_instance, *args, **kwargs): return run -class SearchIndexClientTestAsync(AzureMgmtTestCase): +class SearchIndexClientTestAsync(AzureMgmtTestCase): @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @await_prepared_test - async def test_async_get_document_count(self, api_key, endpoint, index_name, **kwargs): + async def test_async_get_document_count( + self, api_key, endpoint, index_name, **kwargs + ): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) @@ -62,7 +69,7 @@ async def test_get_document(self, api_key, endpoint, index_name, **kwargs): async with client: for hotel_id in range(1, 11): result = await client.get_document(key=str(hotel_id)) - expected = BATCH['value'][hotel_id-1] + expected = BATCH["value"][hotel_id - 1] assert result.get("hotelId") == expected.get("hotelId") assert result.get("hotelName") == expected.get("hotelName") assert result.get("description") == expected.get("description") @@ -113,10 +120,18 @@ async def test_get_search_filter(self, api_key, endpoint, index_name, **kwargs): results = [] async for x in await client.search(query=query): results.append(x) - assert [x['hotelName'] for x in results] == sorted([x['hotelName'] for x in results], reverse=True) - expected = {"category", "hotelName", "description", "@search.score", "@search.highlights"} + assert [x["hotelName"] for x in results] == sorted( + [x["hotelName"] for x in results], reverse=True + ) + expected = { + "category", + "hotelName", + "description", + "@search.score", + "@search.highlights", + } assert all(set(x) == expected for x in results) - assert all(x['category'] == "Budget" for x in results) + assert all(x["category"] == "Budget" for x in results) @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @@ -134,7 +149,6 @@ async def test_get_search_counts(self, api_key, endpoint, index_name, **kwargs): results = await client.search(query=query) assert await results.get_count() == 7 - @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @await_prepared_test @@ -156,7 +170,9 @@ async def test_get_search_coverage(self, api_key, endpoint, index_name, **kwargs @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @await_prepared_test - async def test_get_search_facets_none(self, api_key, endpoint, index_name, **kwargs): + async def test_get_search_facets_none( + self, api_key, endpoint, index_name, **kwargs + ): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) @@ -171,7 +187,9 @@ async def test_get_search_facets_none(self, api_key, endpoint, index_name, **kwa @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @await_prepared_test - async def test_get_search_facets_result(self, api_key, endpoint, index_name, **kwargs): + async def test_get_search_facets_result( + self, api_key, endpoint, index_name, **kwargs + ): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) @@ -181,7 +199,12 @@ async def test_get_search_facets_result(self, api_key, endpoint, index_name, **k async with client: results = await client.search(query=query) - assert await results.get_facets() == {'category': [{'value': 'Budget', 'count': 4}, {'value': 'Luxury', 'count': 1}]} + assert await results.get_facets() == { + "category": [ + {"value": "Budget", "count": 4}, + {"value": "Luxury", "count": 1}, + ] + } @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @@ -193,7 +216,7 @@ async def test_autocomplete(self, api_key, endpoint, index_name, **kwargs): async with client: query = AutocompleteQuery(search_text="mot", suggester_name="sg") results = await client.autocomplete(query=query) - assert results == [{'text': 'motel', 'query_plus_text': 'motel'}] + assert results == [{"text": "motel", "query_plus_text": "motel"}] @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @@ -206,8 +229,8 @@ async def test_suggest(self, api_key, endpoint, index_name, **kwargs): query = SuggestQuery(search_text="mot", suggester_name="sg") results = await client.suggest(query=query) assert results == [ - {'hotelId': '2', 'text': 'Cheapest hotel in town. Infact, a motel.'}, - {'hotelId': '9', 'text': 'Secret Point Motel'}, + {"hotelId": "2", "text": "Cheapest hotel in town. Infact, a motel."}, + {"hotelId": "9", "text": "Secret Point Motel"}, ] @ResourceGroupPreparer(random_name_enabled=True) @@ -217,18 +240,10 @@ async def test_upload_documents_new(self, api_key, endpoint, index_name, **kwarg client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) - DOCUMENTS = [{ - 'hotelId': '1000', - 'rating': 5, - 'rooms': [], - 'hotelName': 'Azure Inn', - }, - { - 'hotelId': '1001', - 'rating': 4, - 'rooms': [], - 'hotelName': 'Redmond Hotel', - }] + DOCUMENTS = [ + {"hotelId": "1000", "rating": 5, "rooms": [], "hotelName": "Azure Inn"}, + {"hotelId": "1001", "rating": 4, "rooms": [], "hotelName": "Redmond Hotel"}, + ] async with client: results = await client.upload_documents(DOCUMENTS) @@ -240,31 +255,25 @@ async def test_upload_documents_new(self, api_key, endpoint, index_name, **kwarg assert await client.get_document_count() == 12 for doc in DOCUMENTS: - result = await client.get_document(key=doc['hotelId']) - assert result['hotelId'] == doc['hotelId'] - assert result['hotelName'] == doc['hotelName'] - assert result['rating'] == doc['rating'] - assert result['rooms'] == doc['rooms'] + result = await client.get_document(key=doc["hotelId"]) + assert result["hotelId"] == doc["hotelId"] + assert result["hotelName"] == doc["hotelName"] + assert result["rating"] == doc["rating"] + assert result["rooms"] == doc["rooms"] @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @await_prepared_test - async def test_upload_documents_existing(self, api_key, endpoint, index_name, **kwargs): + async def test_upload_documents_existing( + self, api_key, endpoint, index_name, **kwargs + ): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) - DOCUMENTS = [{ - 'hotelId': '1000', - 'rating': 5, - 'rooms': [], - 'hotelName': 'Azure Inn', - }, - { - 'hotelId': '3', - 'rating': 4, - 'rooms': [], - 'hotelName': 'Redmond Hotel', - }] + DOCUMENTS = [ + {"hotelId": "1000", "rating": 5, "rooms": [], "hotelName": "Azure Inn"}, + {"hotelId": "3", "rating": 4, "rooms": [], "hotelName": "Redmond Hotel"}, + ] async with client: results = await client.upload_documents(DOCUMENTS) assert len(results) == 2 @@ -273,12 +282,16 @@ async def test_upload_documents_existing(self, api_key, endpoint, index_name, ** @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @await_prepared_test - async def test_delete_documents_existing(self, api_key, endpoint, index_name, **kwargs): + async def test_delete_documents_existing( + self, api_key, endpoint, index_name, **kwargs + ): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) async with client: - results = await client.delete_documents([{"hotelId": "3"}, {"hotelId": "4"}]) + results = await client.delete_documents( + [{"hotelId": "3"}, {"hotelId": "4"}] + ) assert len(results) == 2 assert set(x.status_code for x in results) == {200} @@ -296,12 +309,16 @@ async def test_delete_documents_existing(self, api_key, endpoint, index_name, ** @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @await_prepared_test - async def test_delete_documents_missing(self, api_key, endpoint, index_name, **kwargs): + async def test_delete_documents_missing( + self, api_key, endpoint, index_name, **kwargs + ): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) async with client: - results = await client.delete_documents([{"hotelId": "1000"}, {"hotelId": "4"}]) + results = await client.delete_documents( + [{"hotelId": "1000"}, {"hotelId": "4"}] + ) assert len(results) == 2 assert set(x.status_code for x in results) == {200} @@ -319,12 +336,16 @@ async def test_delete_documents_missing(self, api_key, endpoint, index_name, **k @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @await_prepared_test - async def test_merge_documents_existing(self, api_key, endpoint, index_name, **kwargs): + async def test_merge_documents_existing( + self, api_key, endpoint, index_name, **kwargs + ): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) async with client: - results = await client.merge_documents([{"hotelId": "3", "rating": 1}, {"hotelId": "4", "rating": 2}]) + results = await client.merge_documents( + [{"hotelId": "3", "rating": 1}, {"hotelId": "4", "rating": 2}] + ) assert len(results) == 2 assert set(x.status_code for x in results) == {200} @@ -342,12 +363,16 @@ async def test_merge_documents_existing(self, api_key, endpoint, index_name, **k @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @await_prepared_test - async def test_merge_documents_missing(self, api_key, endpoint, index_name, **kwargs): + async def test_merge_documents_missing( + self, api_key, endpoint, index_name, **kwargs + ): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) async with client: - results = await client.merge_documents([{"hotelId": "1000", "rating": 1}, {"hotelId": "4", "rating": 2}]) + results = await client.merge_documents( + [{"hotelId": "1000", "rating": 1}, {"hotelId": "4", "rating": 2}] + ) assert len(results) == 2 assert set(x.status_code for x in results) == {200, 404} @@ -365,12 +390,16 @@ async def test_merge_documents_missing(self, api_key, endpoint, index_name, **kw @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @await_prepared_test - async def test_merge_or_upload_documents(self, api_key, endpoint, index_name, **kwargs): + async def test_merge_or_upload_documents( + self, api_key, endpoint, index_name, **kwargs + ): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) async with client: - results = await client.merge_or_upload_documents([{"hotelId": "1000", "rating": 1}, {"hotelId": "4", "rating": 2}]) + results = await client.merge_or_upload_documents( + [{"hotelId": "1000", "rating": 1}, {"hotelId": "4", "rating": 2}] + ) assert len(results) == 2 assert set(x.status_code for x in results) == {200, 201} diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py new file mode 100644 index 0000000000000..828887328fcbe --- /dev/null +++ b/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py @@ -0,0 +1,47 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import asyncio +import functools +import json +from os.path import dirname, join, realpath +import time + +import pytest + +from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer + +from search_service_preparer import SearchServicePreparer + +from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function + +from azure.core.exceptions import HttpResponseError +from azure.search.documents import SearchApiKeyCredential +from azure.search.documents.aio import SearchServiceClient + + +def await_prepared_test(test_fn): + """Synchronous wrapper for async test methods. Used to avoid making changes + upstream to AbstractPreparer (which doesn't await the functions it wraps) + """ + + @functools.wraps(test_fn) + def run(test_class_instance, *args, **kwargs): + trim_kwargs_from_test_function(test_fn, kwargs) + loop = asyncio.get_event_loop() + return loop.run_until_complete(test_fn(test_class_instance, **kwargs)) + + return run + + +class SearchIndexClientTest(AzureMgmtTestCase): + @ResourceGroupPreparer(random_name_enabled=True) + @SearchServicePreparer() + @await_prepared_test + async def test_get_service_statistics(self, api_key, endpoint, **kwargs): + client = SearchServiceClient(endpoint, SearchApiKeyCredential(api_key)) + result = await client.get_service_statistics() + assert isinstance(result, dict) + assert set(result.keys()) == {"counters", "limits"} diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_autocomplete.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_autocomplete.yaml similarity index 84% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_autocomplete.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_autocomplete.yaml index 7cedc0f483d60..05577a274b9d1 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_autocomplete.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_autocomplete.yaml @@ -15,9 +15,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 485C0BC909AE16E02F86B3420E914A1A + - 530914336379F37DE9758A354DE4A78B method: POST - uri: https://search9c0e0b2f.search.windows.net/indexes('drgqefsg')/docs/search.post.autocomplete?api-version=2019-05-06-Preview + uri: https://searche7b20da6.search.windows.net/indexes('drgqefsg')/docs/search.post.autocomplete?api-version=2019-05-06-Preview response: body: string: '{"value":[{"text":"motel","queryPlusText":"motel"}]}' @@ -29,9 +29,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:46:02 GMT + - Thu, 19 Mar 2020 00:14:05 GMT elapsed-time: - - '553' + - '162' expires: - '-1' odata-version: @@ -41,7 +41,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 007c6080-67ae-11ea-9335-8c8590507855 + - 8b0bcdb6-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_delete_documents_existing.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_delete_documents_existing.yaml similarity index 81% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_delete_documents_existing.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_delete_documents_existing.yaml index b3bd155100451..c500e772b0a4f 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_delete_documents_existing.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_delete_documents_existing.yaml @@ -16,9 +16,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - AC3A707262B0134A190F5DAD3E0C8194 + - 2F50A464F96DEE54FA95B5848C35E3BD method: POST - uri: https://search5091108b.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://searchbc401302.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"3","status":true,"errorMessage":null,"statusCode":200},{"key":"4","status":true,"errorMessage":null,"statusCode":200}]}' @@ -30,9 +30,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:46:14 GMT + - Thu, 19 Mar 2020 00:14:14 GMT elapsed-time: - - '202' + - '83' expires: - '-1' odata-version: @@ -42,7 +42,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 078c9afc-67ae-11ea-9335-8c8590507855 + - 915aa9bc-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -62,9 +62,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - AC3A707262B0134A190F5DAD3E0C8194 + - 2F50A464F96DEE54FA95B5848C35E3BD method: GET - uri: https://search5091108b.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://searchbc401302.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF8" @@ -76,9 +76,9 @@ interactions: content-type: - text/plain date: - - Mon, 16 Mar 2020 17:46:17 GMT + - Thu, 19 Mar 2020 00:14:18 GMT elapsed-time: - - '53' + - '22' expires: - '-1' odata-version: @@ -88,7 +88,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 099ff668-67ae-11ea-9335-8c8590507855 + - 934ae89a-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -108,9 +108,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - AC3A707262B0134A190F5DAD3E0C8194 + - 2F50A464F96DEE54FA95B5848C35E3BD method: GET - uri: https://search5091108b.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview + uri: https://searchbc401302.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview response: body: string: '' @@ -120,15 +120,15 @@ interactions: content-length: - '0' date: - - Mon, 16 Mar 2020 17:46:17 GMT + - Thu, 19 Mar 2020 00:14:18 GMT elapsed-time: - - '12' + - '17' expires: - '-1' pragma: - no-cache request-id: - - 09b4be68-67ae-11ea-9335-8c8590507855 + - 935704fe-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains status: @@ -146,9 +146,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - AC3A707262B0134A190F5DAD3E0C8194 + - 2F50A464F96DEE54FA95B5848C35E3BD method: GET - uri: https://search5091108b.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + uri: https://searchbc401302.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview response: body: string: '' @@ -158,15 +158,15 @@ interactions: content-length: - '0' date: - - Mon, 16 Mar 2020 17:46:17 GMT + - Thu, 19 Mar 2020 00:14:18 GMT elapsed-time: - - '8' + - '4' expires: - '-1' pragma: - no-cache request-id: - - 09bd51e0-67ae-11ea-9335-8c8590507855 + - 93605090-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains status: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_delete_documents_missing.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_delete_documents_missing.yaml similarity index 82% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_delete_documents_missing.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_delete_documents_missing.yaml index a3c4285eb25a0..345f9b6d835dc 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_delete_documents_missing.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_delete_documents_missing.yaml @@ -16,9 +16,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - F9DBBA1EE4DF2796ED08B24A65C1F688 + - C59C228A99A4C7BBEDD6ABCAF5AC4BCB method: POST - uri: https://search3ff8101a.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://searcha9301291.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"1000","status":true,"errorMessage":null,"statusCode":200},{"key":"4","status":true,"errorMessage":null,"statusCode":200}]}' @@ -30,9 +30,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:46:27 GMT + - Thu, 19 Mar 2020 00:14:29 GMT elapsed-time: - - '163' + - '83' expires: - '-1' odata-version: @@ -42,7 +42,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 1056f542-67ae-11ea-9335-8c8590507855 + - 9992daf0-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -62,9 +62,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - F9DBBA1EE4DF2796ED08B24A65C1F688 + - C59C228A99A4C7BBEDD6ABCAF5AC4BCB method: GET - uri: https://search3ff8101a.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://searcha9301291.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF9" @@ -76,9 +76,9 @@ interactions: content-type: - text/plain date: - - Mon, 16 Mar 2020 17:46:31 GMT + - Thu, 19 Mar 2020 00:14:32 GMT elapsed-time: - - '3' + - '5' expires: - '-1' odata-version: @@ -88,7 +88,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 12545eca-67ae-11ea-9335-8c8590507855 + - 9b842f44-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -108,9 +108,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - F9DBBA1EE4DF2796ED08B24A65C1F688 + - C59C228A99A4C7BBEDD6ABCAF5AC4BCB method: GET - uri: https://search3ff8101a.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + uri: https://searcha9301291.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview response: body: string: '' @@ -120,15 +120,15 @@ interactions: content-length: - '0' date: - - Mon, 16 Mar 2020 17:46:31 GMT + - Thu, 19 Mar 2020 00:14:32 GMT elapsed-time: - - '9' + - '5' expires: - '-1' pragma: - no-cache request-id: - - 125bf19e-67ae-11ea-9335-8c8590507855 + - 9b8ba9f4-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains status: @@ -146,9 +146,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - F9DBBA1EE4DF2796ED08B24A65C1F688 + - C59C228A99A4C7BBEDD6ABCAF5AC4BCB method: GET - uri: https://search3ff8101a.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + uri: https://searcha9301291.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview response: body: string: '' @@ -158,7 +158,7 @@ interactions: content-length: - '0' date: - - Mon, 16 Mar 2020 17:46:31 GMT + - Thu, 19 Mar 2020 00:14:32 GMT elapsed-time: - '3' expires: @@ -166,7 +166,7 @@ interactions: pragma: - no-cache request-id: - - 1263de5e-67ae-11ea-9335-8c8590507855 + - 9b930bea-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains status: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_document.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_document.yaml similarity index 89% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_get_document.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_document.yaml index b9f6f1f7e9e55..6600dfad9c7a6 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_document.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_document.yaml @@ -11,9 +11,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BE5B0A1B46FBC54DE275C60B32EA914A + - AAECA0741D6484923CBF6DC0881DB76C method: GET - uri: https://search9b020b1b.search.windows.net/indexes('drgqefsg')/docs('1')?api-version=2019-05-06-Preview + uri: https://searche6a60d92.search.windows.net/indexes('drgqefsg')/docs('1')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"1","hotelName":"Fancy Stay","description":"Best hotel in @@ -32,9 +32,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:46:44 GMT + - Thu, 19 Mar 2020 00:14:44 GMT elapsed-time: - - '199' + - '95' expires: - '-1' odata-version: @@ -44,7 +44,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 196e8528-67ae-11ea-9335-8c8590507855 + - a25385b8-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -64,9 +64,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BE5B0A1B46FBC54DE275C60B32EA914A + - AAECA0741D6484923CBF6DC0881DB76C method: GET - uri: https://search9b020b1b.search.windows.net/indexes('drgqefsg')/docs('2')?api-version=2019-05-06-Preview + uri: https://searche6a60d92.search.windows.net/indexes('drgqefsg')/docs('2')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"2","hotelName":"Roach Motel","description":"Cheapest hotel @@ -80,9 +80,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:46:44 GMT + - Thu, 19 Mar 2020 00:14:44 GMT elapsed-time: - - '3' + - '4' expires: - '-1' odata-version: @@ -92,7 +92,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 19acfac4-67ae-11ea-9335-8c8590507855 + - a27c8ada-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -112,9 +112,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BE5B0A1B46FBC54DE275C60B32EA914A + - AAECA0741D6484923CBF6DC0881DB76C method: GET - uri: https://search9b020b1b.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview + uri: https://searche6a60d92.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"3","hotelName":"EconoStay","description":"Very popular @@ -127,9 +127,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:46:44 GMT + - Thu, 19 Mar 2020 00:14:44 GMT elapsed-time: - - '9' + - '7' expires: - '-1' odata-version: @@ -139,7 +139,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 19b4a04e-67ae-11ea-9335-8c8590507855 + - a283d678-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -159,9 +159,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BE5B0A1B46FBC54DE275C60B32EA914A + - AAECA0741D6484923CBF6DC0881DB76C method: GET - uri: https://search9b020b1b.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + uri: https://searche6a60d92.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"4","hotelName":"Express Rooms","description":"Pretty good @@ -174,9 +174,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:46:44 GMT + - Thu, 19 Mar 2020 00:14:44 GMT elapsed-time: - - '4' + - '6' expires: - '-1' odata-version: @@ -186,7 +186,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 19bd124c-67ae-11ea-9335-8c8590507855 + - a28c143c-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -206,9 +206,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BE5B0A1B46FBC54DE275C60B32EA914A + - AAECA0741D6484923CBF6DC0881DB76C method: GET - uri: https://search9b020b1b.search.windows.net/indexes('drgqefsg')/docs('5')?api-version=2019-05-06-Preview + uri: https://searche6a60d92.search.windows.net/indexes('drgqefsg')/docs('5')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"5","hotelName":"Comfy Place","description":"Another good @@ -221,7 +221,7 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:46:44 GMT + - Thu, 19 Mar 2020 00:14:44 GMT elapsed-time: - '3' expires: @@ -233,7 +233,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 19c48aea-67ae-11ea-9335-8c8590507855 + - a293c448-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -253,9 +253,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BE5B0A1B46FBC54DE275C60B32EA914A + - AAECA0741D6484923CBF6DC0881DB76C method: GET - uri: https://search9b020b1b.search.windows.net/indexes('drgqefsg')/docs('6')?api-version=2019-05-06-Preview + uri: https://searche6a60d92.search.windows.net/indexes('drgqefsg')/docs('6')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"6","hotelName":null,"description":"Surprisingly expensive. @@ -268,9 +268,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:46:44 GMT + - Thu, 19 Mar 2020 00:14:44 GMT elapsed-time: - - '4' + - '7' expires: - '-1' odata-version: @@ -280,7 +280,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 19cbf9f6-67ae-11ea-9335-8c8590507855 + - a29af8d0-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -300,9 +300,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BE5B0A1B46FBC54DE275C60B32EA914A + - AAECA0741D6484923CBF6DC0881DB76C method: GET - uri: https://search9b020b1b.search.windows.net/indexes('drgqefsg')/docs('7')?api-version=2019-05-06-Preview + uri: https://searche6a60d92.search.windows.net/indexes('drgqefsg')/docs('7')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"7","hotelName":"Modern Stay","description":"Modern architecture, @@ -316,9 +316,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:46:44 GMT + - Thu, 19 Mar 2020 00:14:44 GMT elapsed-time: - - '9' + - '4' expires: - '-1' odata-version: @@ -328,7 +328,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 19d4336e-67ae-11ea-9335-8c8590507855 + - a2a2f83c-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -348,9 +348,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BE5B0A1B46FBC54DE275C60B32EA914A + - AAECA0741D6484923CBF6DC0881DB76C method: GET - uri: https://search9b020b1b.search.windows.net/indexes('drgqefsg')/docs('8')?api-version=2019-05-06-Preview + uri: https://searche6a60d92.search.windows.net/indexes('drgqefsg')/docs('8')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"8","hotelName":null,"description":"Has some road noise @@ -365,9 +365,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:46:44 GMT + - Thu, 19 Mar 2020 00:14:44 GMT elapsed-time: - - '3' + - '10' expires: - '-1' odata-version: @@ -377,7 +377,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 19dcb8d6-67ae-11ea-9335-8c8590507855 + - a2aa49b6-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -397,9 +397,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BE5B0A1B46FBC54DE275C60B32EA914A + - AAECA0741D6484923CBF6DC0881DB76C method: GET - uri: https://search9b020b1b.search.windows.net/indexes('drgqefsg')/docs('9')?api-version=2019-05-06-Preview + uri: https://searche6a60d92.search.windows.net/indexes('drgqefsg')/docs('9')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"9","hotelName":"Secret Point Motel","description":"The @@ -428,9 +428,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:46:44 GMT + - Thu, 19 Mar 2020 00:14:44 GMT elapsed-time: - - '17' + - '10' expires: - '-1' odata-version: @@ -440,7 +440,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 19e46824-67ae-11ea-9335-8c8590507855 + - a2b320d6-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -460,9 +460,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BE5B0A1B46FBC54DE275C60B32EA914A + - AAECA0741D6484923CBF6DC0881DB76C method: GET - uri: https://search9b020b1b.search.windows.net/indexes('drgqefsg')/docs('10')?api-version=2019-05-06-Preview + uri: https://searche6a60d92.search.windows.net/indexes('drgqefsg')/docs('10')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"10","hotelName":"Countryside Hotel","description":"Save @@ -487,9 +487,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:46:44 GMT + - Thu, 19 Mar 2020 00:14:44 GMT elapsed-time: - - '8' + - '5' expires: - '-1' odata-version: @@ -499,7 +499,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 19eef35c-67ae-11ea-9335-8c8590507855 + - a2bb5d50-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_document_count.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_document_count.yaml similarity index 82% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_get_document_count.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_document_count.yaml index 3cd80b78a4b27..ee2b0c1d6d664 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_document_count.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_document_count.yaml @@ -11,9 +11,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 04EC5611BA12DE954BE4736729DDECCB + - 3D98B9FE572C1AE1E540CEF72B72AF1E method: GET - uri: https://searche6380da3.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://search40b5101a.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF10" @@ -25,9 +25,9 @@ interactions: content-type: - text/plain date: - - Mon, 16 Mar 2020 17:46:57 GMT + - Thu, 19 Mar 2020 00:14:55 GMT elapsed-time: - - '160' + - '87' expires: - '-1' odata-version: @@ -37,7 +37,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 2150a1c2-67ae-11ea-9335-8c8590507855 + - a8eb7e76-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_document_missing.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_document_missing.yaml similarity index 73% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_get_document_missing.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_document_missing.yaml index e375beb50d3fb..638963f9955ac 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_document_missing.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_document_missing.yaml @@ -11,9 +11,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 0F08D2849C0E355713237338FC559BEA + - D594DD68270C6C893C8671BDAE97D90B method: GET - uri: https://search2db0e74.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + uri: https://search623710eb.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview response: body: string: '' @@ -23,15 +23,15 @@ interactions: content-length: - '0' date: - - Mon, 16 Mar 2020 17:47:09 GMT + - Thu, 19 Mar 2020 00:15:05 GMT elapsed-time: - - '162' + - '58' expires: - '-1' pragma: - no-cache request-id: - - 2863347a-67ae-11ea-9335-8c8590507855 + - af9eccd2-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains status: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_counts.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_counts.yaml similarity index 97% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_counts.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_counts.yaml index 04a97db706c89..82cc58ef9c72f 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_counts.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_counts.yaml @@ -15,9 +15,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 7BC522B2EB992E5980E99F64DAAA7BCD + - 35D12307D2FB9CEECF686F4B750F3824 method: POST - uri: https://searchd8330d2d.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search30390fa4.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.score":0.48248714,"hotelId":"10","hotelName":"Countryside @@ -72,9 +72,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Wed, 18 Mar 2020 17:40:46 GMT + - Thu, 19 Mar 2020 00:15:17 GMT elapsed-time: - - '43' + - '111' expires: - '-1' odata-version: @@ -84,7 +84,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 990c67f4-693f-11ea-8daf-8c8590507855 + - b5f80b3e-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -108,9 +108,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 7BC522B2EB992E5980E99F64DAAA7BCD + - 35D12307D2FB9CEECF686F4B750F3824 method: POST - uri: https://searchd8330d2d.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search30390fa4.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"@odata.count":7,"value":[{"@search.score":0.48248714,"hotelId":"10","hotelName":"Countryside @@ -165,9 +165,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Wed, 18 Mar 2020 17:40:46 GMT + - Thu, 19 Mar 2020 00:15:17 GMT elapsed-time: - - '16' + - '7' expires: - '-1' odata-version: @@ -177,7 +177,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 992dabee-693f-11ea-8daf-8c8590507855 + - b6235e42-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_coverage.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_coverage.yaml similarity index 97% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_coverage.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_coverage.yaml index 8b6fd108bad73..408caa14cc4cb 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_coverage.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_coverage.yaml @@ -15,9 +15,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 9AAD9788D1C58D51FDC40F5EFBB82A69 + - 3D83CCCC79A5C26C4EDB221667E2A473 method: POST - uri: https://searchf35b0ddd.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search504f1054.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.score":0.48248714,"hotelId":"10","hotelName":"Countryside @@ -72,9 +72,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Wed, 18 Mar 2020 17:44:15 GMT + - Thu, 19 Mar 2020 00:15:27 GMT elapsed-time: - - '215' + - '75' expires: - '-1' odata-version: @@ -84,7 +84,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 15fda390-6940-11ea-8134-8c8590507855 + - bc2e6fca-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -108,9 +108,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 9AAD9788D1C58D51FDC40F5EFBB82A69 + - 3D83CCCC79A5C26C4EDB221667E2A473 method: POST - uri: https://searchf35b0ddd.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search504f1054.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"@search.coverage":100.0,"value":[{"@search.score":0.48248714,"hotelId":"10","hotelName":"Countryside @@ -165,9 +165,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Wed, 18 Mar 2020 17:44:15 GMT + - Thu, 19 Mar 2020 00:15:27 GMT elapsed-time: - - '7' + - '8' expires: - '-1' odata-version: @@ -177,7 +177,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 1646da6a-6940-11ea-8134-8c8590507855 + - bc527546-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_facets_none.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_facets_none.yaml similarity index 91% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_facets_none.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_facets_none.yaml index b59946001e7e3..7b4247e095816 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_facets_none.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_facets_none.yaml @@ -15,9 +15,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 86A667664C015574DE655187EEEDEC65 + - CF3994F163439E3512CF1A57161E64A8 method: POST - uri: https://search1eef0f16.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search8339118d.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.score":0.2423066,"hotelName":"Countryside Hotel","description":"Save @@ -39,9 +39,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:47:20 GMT + - Thu, 19 Mar 2020 00:15:39 GMT elapsed-time: - - '79' + - '102' expires: - '-1' odata-version: @@ -51,7 +51,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 2f86c3de-67ae-11ea-9335-8c8590507855 + - c392f34e-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_facets_result.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_facets_result.yaml similarity index 91% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_facets_result.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_facets_result.yaml index e5c471bcc40df..07e97be87335e 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_facets_result.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_facets_result.yaml @@ -15,9 +15,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 7A45FD42FFD7D6E8002A7E0C9B4D53AC + - 002239569160D027C911CED9D1D5BA76 method: POST - uri: https://search3e911005.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://searcha7c9127c.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"@search.facets":{"category":[{"count":4,"value":"Budget"},{"count":1,"value":"Luxury"}]},"value":[{"@search.score":0.2423066,"hotelName":"Countryside @@ -39,9 +39,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:47:33 GMT + - Thu, 19 Mar 2020 00:15:50 GMT elapsed-time: - - '164' + - '109' expires: - '-1' odata-version: @@ -51,7 +51,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 37088656-67ae-11ea-9335-8c8590507855 + - c9af8490-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_filter.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_filter.yaml similarity index 90% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_filter.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_filter.yaml index b51819871bf92..8775250def5fe 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_filter.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_filter.yaml @@ -16,9 +16,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 1E3A42C76F7A9C99734E2539CDFE7C21 + - 1222A85F63DE2232A7363D893716898F method: POST - uri: https://searchd7f60d17.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search2ffc0f8e.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.score":0.19169211,"hotelName":"Express Rooms","description":"Pretty @@ -36,9 +36,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:47:45 GMT + - Thu, 19 Mar 2020 00:16:00 GMT elapsed-time: - - '193' + - '93' expires: - '-1' odata-version: @@ -48,7 +48,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 3e1dde0a-67ae-11ea-9335-8c8590507855 + - cfd5e300-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_simple.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_simple.yaml similarity index 96% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_simple.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_simple.yaml index afe519fae1d36..427051bb0fba1 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_search_simple.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_get_search_simple.yaml @@ -15,9 +15,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - D7D7AF1CACCD0DE9525531FB944DAFBD + - 2E2E569ABA0682E66FD3FE84A5C7A19B method: POST - uri: https://searchd83d0d1b.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search30430f92.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.score":0.48248714,"hotelId":"10","hotelName":"Countryside @@ -72,9 +72,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:47:57 GMT + - Thu, 19 Mar 2020 00:16:12 GMT elapsed-time: - - '214' + - '82' expires: - '-1' odata-version: @@ -84,7 +84,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 45038b02-67ae-11ea-9335-8c8590507855 + - d6f05db4-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -108,9 +108,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - D7D7AF1CACCD0DE9525531FB944DAFBD + - 2E2E569ABA0682E66FD3FE84A5C7A19B method: POST - uri: https://searchd83d0d1b.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview + uri: https://search30430f92.search.windows.net/indexes('drgqefsg')/docs/search.post.search?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.score":1.2368374,"hotelId":"2","hotelName":"Roach @@ -141,9 +141,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:47:57 GMT + - Thu, 19 Mar 2020 00:16:12 GMT elapsed-time: - - '8' + - '6' expires: - '-1' odata-version: @@ -153,7 +153,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 4545107c-67ae-11ea-9335-8c8590507855 + - d71648c6-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_merge_documents_existing.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_merge_documents_existing.yaml similarity index 85% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_merge_documents_existing.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_merge_documents_existing.yaml index 050b6534a0266..c53ce6f084ae1 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_merge_documents_existing.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_merge_documents_existing.yaml @@ -16,9 +16,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 2CB1538AB8C10476CDEE1EA1E47030A6 + - 2927C642406F719329E5D042BB206CB3 method: POST - uri: https://search40dc1028.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://searchaa14129f.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"3","status":true,"errorMessage":null,"statusCode":200},{"key":"4","status":true,"errorMessage":null,"statusCode":200}]}' @@ -30,9 +30,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:48:08 GMT + - Thu, 19 Mar 2020 00:16:23 GMT elapsed-time: - - '182' + - '104' expires: - '-1' odata-version: @@ -42,7 +42,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 4bd92176-67ae-11ea-9335-8c8590507855 + - dd65af1e-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -62,9 +62,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 2CB1538AB8C10476CDEE1EA1E47030A6 + - 2927C642406F719329E5D042BB206CB3 method: GET - uri: https://search40dc1028.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://searchaa14129f.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF10" @@ -76,9 +76,9 @@ interactions: content-type: - text/plain date: - - Mon, 16 Mar 2020 17:48:11 GMT + - Thu, 19 Mar 2020 00:16:26 GMT elapsed-time: - - '56' + - '25' expires: - '-1' odata-version: @@ -88,7 +88,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 4dd807b2-67ae-11ea-9335-8c8590507855 + - df589ac0-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -108,9 +108,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 2CB1538AB8C10476CDEE1EA1E47030A6 + - 2927C642406F719329E5D042BB206CB3 method: GET - uri: https://search40dc1028.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview + uri: https://searchaa14129f.search.windows.net/indexes('drgqefsg')/docs('3')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"3","hotelName":"EconoStay","description":"Very popular @@ -123,9 +123,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:48:11 GMT + - Thu, 19 Mar 2020 00:16:26 GMT elapsed-time: - - '48' + - '8' expires: - '-1' odata-version: @@ -135,7 +135,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 4dec43e4-67ae-11ea-9335-8c8590507855 + - df6527c2-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -155,9 +155,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 2CB1538AB8C10476CDEE1EA1E47030A6 + - 2927C642406F719329E5D042BB206CB3 method: GET - uri: https://search40dc1028.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + uri: https://searchaa14129f.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"4","hotelName":"Express Rooms","description":"Pretty good @@ -170,9 +170,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:48:11 GMT + - Thu, 19 Mar 2020 00:16:26 GMT elapsed-time: - - '4' + - '5' expires: - '-1' odata-version: @@ -182,7 +182,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 4dfb49d4-67ae-11ea-9335-8c8590507855 + - df6d1324-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_merge_documents_missing.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_merge_documents_missing.yaml similarity index 84% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_merge_documents_missing.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_merge_documents_missing.yaml index c1a929e96a05d..77c0e35ba7d1d 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_merge_documents_missing.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_merge_documents_missing.yaml @@ -16,9 +16,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BDFB04AB0E60475F9FECF0E0E730D8E2 + - 8FCB206B89570E9F2297DF9161D5C43E method: POST - uri: https://search30a60fb7.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://search9767122e.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"1000","status":false,"errorMessage":"Document not @@ -31,9 +31,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:48:23 GMT + - Thu, 19 Mar 2020 00:16:38 GMT elapsed-time: - - '189' + - '99' expires: - '-1' odata-version: @@ -43,7 +43,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 54b2e1ba-67ae-11ea-9335-8c8590507855 + - e6413194-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -63,9 +63,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BDFB04AB0E60475F9FECF0E0E730D8E2 + - 8FCB206B89570E9F2297DF9161D5C43E method: GET - uri: https://search30a60fb7.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://search9767122e.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF10" @@ -77,9 +77,9 @@ interactions: content-type: - text/plain date: - - Mon, 16 Mar 2020 17:48:26 GMT + - Thu, 19 Mar 2020 00:16:40 GMT elapsed-time: - - '59' + - '4' expires: - '-1' odata-version: @@ -89,7 +89,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 56bf155a-67ae-11ea-9335-8c8590507855 + - e8364f98-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -109,9 +109,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BDFB04AB0E60475F9FECF0E0E730D8E2 + - 8FCB206B89570E9F2297DF9161D5C43E method: GET - uri: https://search30a60fb7.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + uri: https://search9767122e.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview response: body: string: '' @@ -121,15 +121,15 @@ interactions: content-length: - '0' date: - - Mon, 16 Mar 2020 17:48:26 GMT + - Thu, 19 Mar 2020 00:16:40 GMT elapsed-time: - - '11' + - '18' expires: - '-1' pragma: - no-cache request-id: - - 56d3eef8-67ae-11ea-9335-8c8590507855 + - e83dd416-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains status: @@ -147,9 +147,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - BDFB04AB0E60475F9FECF0E0E730D8E2 + - 8FCB206B89570E9F2297DF9161D5C43E method: GET - uri: https://search30a60fb7.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + uri: https://search9767122e.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"4","hotelName":"Express Rooms","description":"Pretty good @@ -162,9 +162,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:48:26 GMT + - Thu, 19 Mar 2020 00:16:40 GMT elapsed-time: - - '28' + - '11' expires: - '-1' odata-version: @@ -174,7 +174,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 56dd5fa6-67ae-11ea-9335-8c8590507855 + - e8473952-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_merge_or_upload_documents.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_merge_or_upload_documents.yaml similarity index 85% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_merge_or_upload_documents.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_merge_or_upload_documents.yaml index 30d69e4fcac5f..12e77f6bb983b 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_merge_or_upload_documents.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_merge_or_upload_documents.yaml @@ -16,9 +16,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - CE1152D48C4AA22542DB5708543FE190 + - 3A5229865D32A294DD0A4643F419953E method: POST - uri: https://search50c51082.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://searchbc7412f9.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"1000","status":true,"errorMessage":null,"statusCode":201},{"key":"4","status":true,"errorMessage":null,"statusCode":200}]}' @@ -30,9 +30,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:48:38 GMT + - Thu, 19 Mar 2020 00:16:52 GMT elapsed-time: - - '178' + - '97' expires: - '-1' odata-version: @@ -42,7 +42,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 5d9bd714-67ae-11ea-9335-8c8590507855 + - eed08666-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -62,9 +62,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - CE1152D48C4AA22542DB5708543FE190 + - 3A5229865D32A294DD0A4643F419953E method: GET - uri: https://search50c51082.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://searchbc7412f9.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF11" @@ -76,9 +76,9 @@ interactions: content-type: - text/plain date: - - Mon, 16 Mar 2020 17:48:41 GMT + - Thu, 19 Mar 2020 00:16:55 GMT elapsed-time: - - '4' + - '3' expires: - '-1' odata-version: @@ -88,7 +88,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 5f9b58b4-67ae-11ea-9335-8c8590507855 + - f0c2b2e6-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -108,9 +108,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - CE1152D48C4AA22542DB5708543FE190 + - 3A5229865D32A294DD0A4643F419953E method: GET - uri: https://search50c51082.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + uri: https://searchbc7412f9.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"1000","hotelName":null,"description":null,"descriptionFr":null,"category":null,"tags":[],"parkingIncluded":null,"smokingAllowed":null,"lastRenovationDate":null,"rating":1,"location":null,"address":null,"rooms":[]}' @@ -122,9 +122,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:48:41 GMT + - Thu, 19 Mar 2020 00:16:55 GMT elapsed-time: - - '6' + - '7' expires: - '-1' odata-version: @@ -134,7 +134,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 5fa45e5a-67ae-11ea-9335-8c8590507855 + - f0ca0aaa-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -154,9 +154,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - CE1152D48C4AA22542DB5708543FE190 + - 3A5229865D32A294DD0A4643F419953E method: GET - uri: https://search50c51082.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview + uri: https://searchbc7412f9.search.windows.net/indexes('drgqefsg')/docs('4')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"4","hotelName":"Express Rooms","description":"Pretty good @@ -169,7 +169,7 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:48:41 GMT + - Thu, 19 Mar 2020 00:16:55 GMT elapsed-time: - '6' expires: @@ -181,7 +181,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 5faddb56-67ae-11ea-9335-8c8590507855 + - f0d16868-6976-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_suggest.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_suggest.yaml similarity index 85% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_suggest.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_suggest.yaml index c8b7f2d35cfcb..2c4ccb8120ecd 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_suggest.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_suggest.yaml @@ -15,9 +15,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 8EF6EC1005F0C2673FA32DD19DBD1F74 + - 46ABAB8B64DEF7BC3124DCE5FC2CF36D method: POST - uri: https://search687b091f.search.windows.net/indexes('drgqefsg')/docs/search.post.suggest?api-version=2019-05-06-Preview + uri: https://searcha7cc0b96.search.windows.net/indexes('drgqefsg')/docs/search.post.suggest?api-version=2019-05-06-Preview response: body: string: '{"value":[{"@search.text":"Cheapest hotel in town. Infact, a motel.","hotelId":"2"},{"@search.text":"Secret @@ -30,9 +30,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:48:53 GMT + - Thu, 19 Mar 2020 00:17:27 GMT elapsed-time: - - '201' + - '84' expires: - '-1' odata-version: @@ -42,7 +42,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 6654f71e-67ae-11ea-9335-8c8590507855 + - 03fd7f08-6977-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_upload_documents_existing.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_upload_documents_existing.yaml similarity index 87% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_upload_documents_existing.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_upload_documents_existing.yaml index 85117e5e04d97..eb8127250114a 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_upload_documents_existing.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_upload_documents_existing.yaml @@ -17,9 +17,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 6E884F2B6E53623D1EF7449BE07689F4 + - D63A7EE90E66D753E9C5392ADBCC52B1 method: POST - uri: https://search527b109d.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://searchbe2a1314.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"1000","status":true,"errorMessage":null,"statusCode":201},{"key":"3","status":true,"errorMessage":null,"statusCode":200}]}' @@ -31,9 +31,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:49:04 GMT + - Thu, 19 Mar 2020 00:18:07 GMT elapsed-time: - - '147' + - '123' expires: - '-1' odata-version: @@ -43,7 +43,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 6d539386-67ae-11ea-9335-8c8590507855 + - 1b5b4388-6977-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_upload_documents_new.yaml b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_upload_documents_new.yaml similarity index 81% rename from sdk/search/azure-search-documents/tests/recordings/test_live.test_upload_documents_new.yaml rename to sdk/search/azure-search-documents/tests/recordings/test_index_live.test_upload_documents_new.yaml index cf02f84d4d490..beffa8a1c9fcb 100644 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_upload_documents_new.yaml +++ b/sdk/search/azure-search-documents/tests/recordings/test_index_live.test_upload_documents_new.yaml @@ -17,9 +17,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 27498E40CE6D7581778C1EEA658A1629 + - 9E733832432E2AF10FA4983DDAB03F52 method: POST - uri: https://search3990e7c.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview + uri: https://search62f510f3.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06-Preview response: body: string: '{"value":[{"key":"1000","status":true,"errorMessage":null,"statusCode":201},{"key":"1001","status":true,"errorMessage":null,"statusCode":201}]}' @@ -31,9 +31,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:49:16 GMT + - Thu, 19 Mar 2020 00:18:17 GMT elapsed-time: - - '192' + - '82' expires: - '-1' odata-version: @@ -43,7 +43,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 7450927e-67ae-11ea-9335-8c8590507855 + - 219f5cca-6977-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -63,9 +63,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 27498E40CE6D7581778C1EEA658A1629 + - 9E733832432E2AF10FA4983DDAB03F52 method: GET - uri: https://search3990e7c.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview + uri: https://search62f510f3.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06-Preview response: body: string: "\uFEFF12" @@ -77,9 +77,9 @@ interactions: content-type: - text/plain date: - - Mon, 16 Mar 2020 17:49:19 GMT + - Thu, 19 Mar 2020 00:18:20 GMT elapsed-time: - - '45' + - '23' expires: - '-1' odata-version: @@ -89,7 +89,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 7651a590-67ae-11ea-9335-8c8590507855 + - 23918ef4-6977-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -109,9 +109,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 27498E40CE6D7581778C1EEA658A1629 + - 9E733832432E2AF10FA4983DDAB03F52 method: GET - uri: https://search3990e7c.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview + uri: https://search62f510f3.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"1000","hotelName":"Azure Inn","description":null,"descriptionFr":null,"category":null,"tags":[],"parkingIncluded":null,"smokingAllowed":null,"lastRenovationDate":null,"rating":5,"location":null,"address":null,"rooms":[]}' @@ -123,9 +123,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:49:19 GMT + - Thu, 19 Mar 2020 00:18:20 GMT elapsed-time: - - '14' + - '9' expires: - '-1' odata-version: @@ -135,7 +135,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 7663227a-67ae-11ea-9335-8c8590507855 + - 239dc840-6977-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: @@ -155,9 +155,9 @@ interactions: User-Agent: - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) api-key: - - 27498E40CE6D7581778C1EEA658A1629 + - 9E733832432E2AF10FA4983DDAB03F52 method: GET - uri: https://search3990e7c.search.windows.net/indexes('drgqefsg')/docs('1001')?api-version=2019-05-06-Preview + uri: https://search62f510f3.search.windows.net/indexes('drgqefsg')/docs('1001')?api-version=2019-05-06-Preview response: body: string: '{"hotelId":"1001","hotelName":"Redmond Hotel","description":null,"descriptionFr":null,"category":null,"tags":[],"parkingIncluded":null,"smokingAllowed":null,"lastRenovationDate":null,"rating":4,"location":null,"address":null,"rooms":[]}' @@ -169,9 +169,9 @@ interactions: content-type: - application/json; odata.metadata=none date: - - Mon, 16 Mar 2020 17:49:19 GMT + - Thu, 19 Mar 2020 00:18:20 GMT elapsed-time: - - '5' + - '16' expires: - '-1' odata-version: @@ -181,7 +181,7 @@ interactions: preference-applied: - odata.include-annotations="*" request-id: - - 766e3ea8-67ae-11ea-9335-8c8590507855 + - 23a6ab7c-6977-11ea-bb71-8c8590507855 strict-transport-security: - max-age=15724800; includeSubDomains vary: diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_basic.yaml b/sdk/search/azure-search-documents/tests/recordings/test_live.test_basic.yaml deleted file mode 100644 index f9786dfca4f1d..0000000000000 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_basic.yaml +++ /dev/null @@ -1,48 +0,0 @@ -interactions: -- request: - body: null - headers: - Accept: - - application/json;odata.metadata=none - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - User-Agent: - - azsdk-python-core/1.2.3 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) azsdk-python-searchindexclient/unknown - api-key: - - CD6D4088F021D9EDF7DFB3DBD9BF207D - method: GET - uri: https://test-service-name.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06 - response: - body: - string: "\uFEFF10" - headers: - cache-control: - - no-cache - content-length: - - '5' - content-type: - - text/plain - date: - - Thu, 05 Mar 2020 17:48:42 GMT - elapsed-time: - - '63' - expires: - - '-1' - odata-version: - - '4.0' - pragma: - - no-cache - preference-applied: - - odata.include-annotations="*" - request-id: - - 8d76ef08-5f09-11ea-b10a-8c8590507855 - strict-transport-security: - - max-age=15724800; includeSubDomains - vary: - - Accept-Encoding - status: - code: 200 - message: OK -version: 1 diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_autocomplete.yaml b/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_autocomplete.yaml deleted file mode 100644 index 3d46c0d2a5924..0000000000000 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_autocomplete.yaml +++ /dev/null @@ -1,52 +0,0 @@ -interactions: -- request: - body: '{"search": "mot", "suggesterName": "sg"}' - headers: - Accept: - - application/json;odata.metadata=none - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '40' - Content-Type: - - application/json - User-Agent: - - azsdk-python-core/1.2.3 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) azsdk-python-searchindexclient/unknown - api-key: - - 6CF152470BA4BF2B1A1ED6A61D8F5945 - method: POST - uri: https://searchcc080cce.search.windows.net/indexes('drgqefsg')/docs/search.post.autocomplete?api-version=2019-05-06 - response: - body: - string: '{"value":[{"text":"motel","queryPlusText":"motel"}]}' - headers: - cache-control: - - no-cache - content-length: - - '52' - content-type: - - application/json; odata.metadata=none - date: - - Thu, 05 Mar 2020 20:22:33 GMT - elapsed-time: - - '16' - expires: - - '-1' - odata-version: - - '4.0' - pragma: - - no-cache - preference-applied: - - odata.include-annotations="*" - request-id: - - 0bdb9fdc-5f1f-11ea-8316-8c8590507855 - strict-transport-security: - - max-age=15724800; includeSubDomains - vary: - - Accept-Encoding - status: - code: 200 - message: OK -version: 1 diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_autocomplete_simple.yaml b/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_autocomplete_simple.yaml deleted file mode 100644 index 48831785afb35..0000000000000 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_autocomplete_simple.yaml +++ /dev/null @@ -1,52 +0,0 @@ -interactions: -- request: - body: '{"search": "Bud", "suggesterName": "sg"}' - headers: - Accept: - - application/json;odata.metadata=none - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '40' - Content-Type: - - application/json - User-Agent: - - azsdk-python-core/1.2.3 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) azsdk-python-searchindexclient/unknown - api-key: - - 2B86FA083D1F82F305D6A73D60077FDA - method: POST - uri: https://search31520fb7.search.windows.net/indexes('drgqefsg')/docs/search.post.autocomplete?api-version=2019-05-06 - response: - body: - string: '{"value":[]}' - headers: - cache-control: - - no-cache - content-length: - - '12' - content-type: - - application/json; odata.metadata=none - date: - - Thu, 05 Mar 2020 20:20:24 GMT - elapsed-time: - - '143' - expires: - - '-1' - odata-version: - - '4.0' - pragma: - - no-cache - preference-applied: - - odata.include-annotations="*" - request-id: - - bed40fc6-5f1e-11ea-a115-8c8590507855 - strict-transport-security: - - max-age=15724800; includeSubDomains - vary: - - Accept-Encoding - status: - code: 200 - message: OK -version: 1 diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_suggest.yaml b/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_suggest.yaml deleted file mode 100644 index 83d80c039cddd..0000000000000 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_get_suggest.yaml +++ /dev/null @@ -1,53 +0,0 @@ -interactions: -- request: - body: '{"search": "mot", "suggesterName": "sg"}' - headers: - Accept: - - application/json;odata.metadata=none - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '40' - Content-Type: - - application/json - User-Agent: - - azsdk-python-core/1.2.3 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) azsdk-python-searchindexclient/unknown - api-key: - - 0CBB4FBBC5AC9CC7D2E5ED9B5299845D - method: POST - uri: https://search905a0abe.search.windows.net/indexes('drgqefsg')/docs/search.post.suggest?api-version=2019-05-06 - response: - body: - string: '{"value":[{"@search.text":"Cheapest hotel in town. Infact, a motel.","hotelId":"2"},{"@search.text":"Secret - Point Motel","hotelId":"9"}]}' - headers: - cache-control: - - no-cache - content-length: - - '137' - content-type: - - application/json; odata.metadata=none - date: - - Thu, 05 Mar 2020 20:23:41 GMT - elapsed-time: - - '138' - expires: - - '-1' - odata-version: - - '4.0' - pragma: - - no-cache - preference-applied: - - odata.include-annotations="*" - request-id: - - 34485c62-5f1f-11ea-9ab6-8c8590507855 - strict-transport-security: - - max-age=15724800; includeSubDomains - vary: - - Accept-Encoding - status: - code: 200 - message: OK -version: 1 diff --git a/sdk/search/azure-search-documents/tests/recordings/test_live.test_upload_documents.yaml b/sdk/search/azure-search-documents/tests/recordings/test_live.test_upload_documents.yaml deleted file mode 100644 index 4a882462a2473..0000000000000 --- a/sdk/search/azure-search-documents/tests/recordings/test_live.test_upload_documents.yaml +++ /dev/null @@ -1,192 +0,0 @@ -interactions: -- request: - body: '{"value": [{"hotelId": "1000", "rating": 5, "rooms": [], "hotelName": "Azure - Inn", "@search.action": "upload"}, {"hotelId": "1001", "rating": 4, "rooms": - [], "hotelName": "Redmond Hotel", "@search.action": "upload"}]}' - headers: - Accept: - - application/json;odata.metadata=none - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '217' - Content-Type: - - application/json - User-Agent: - - azsdk-python-core/1.2.3 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) azsdk-python-searchindexclient/unknown - api-key: - - 48DA7A245D1D909C2DDD5B913A209CA3 - method: POST - uri: https://searchcc370cd3.search.windows.net/indexes('drgqefsg')/docs/search.index?api-version=2019-05-06 - response: - body: - string: '{"value":[{"key":"1000","status":true,"errorMessage":null,"statusCode":201},{"key":"1001","status":true,"errorMessage":null,"statusCode":201}]}' - headers: - cache-control: - - no-cache - content-length: - - '143' - content-type: - - application/json; odata.metadata=none - date: - - Thu, 05 Mar 2020 19:51:20 GMT - elapsed-time: - - '30' - expires: - - '-1' - odata-version: - - '4.0' - pragma: - - no-cache - preference-applied: - - odata.include-annotations="*" - request-id: - - af2e880c-5f1a-11ea-8824-8c8590507855 - strict-transport-security: - - max-age=15724800; includeSubDomains - vary: - - Accept-Encoding - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json;odata.metadata=none - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - User-Agent: - - azsdk-python-core/1.2.3 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) azsdk-python-searchindexclient/unknown - api-key: - - 48DA7A245D1D909C2DDD5B913A209CA3 - method: GET - uri: https://searchcc370cd3.search.windows.net/indexes('drgqefsg')/docs/$count?api-version=2019-05-06 - response: - body: - string: "\uFEFF12" - headers: - cache-control: - - no-cache - content-length: - - '5' - content-type: - - text/plain - date: - - Thu, 05 Mar 2020 19:51:23 GMT - elapsed-time: - - '6' - expires: - - '-1' - odata-version: - - '4.0' - pragma: - - no-cache - preference-applied: - - odata.include-annotations="*" - request-id: - - b117c9da-5f1a-11ea-8824-8c8590507855 - strict-transport-security: - - max-age=15724800; includeSubDomains - vary: - - Accept-Encoding - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json;odata.metadata=none - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - User-Agent: - - azsdk-python-core/1.2.3 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) azsdk-python-searchindexclient/unknown - api-key: - - 48DA7A245D1D909C2DDD5B913A209CA3 - method: GET - uri: https://searchcc370cd3.search.windows.net/indexes('drgqefsg')/docs('1000')?api-version=2019-05-06 - response: - body: - string: '{"hotelId":"1000","hotelName":"Azure Inn","description":null,"descriptionFr":null,"category":null,"tags":[],"parkingIncluded":null,"smokingAllowed":null,"lastRenovationDate":null,"rating":5,"location":null,"address":null,"rooms":[]}' - headers: - cache-control: - - no-cache - content-length: - - '232' - content-type: - - application/json; odata.metadata=none - date: - - Thu, 05 Mar 2020 19:51:23 GMT - elapsed-time: - - '8' - expires: - - '-1' - odata-version: - - '4.0' - pragma: - - no-cache - preference-applied: - - odata.include-annotations="*" - request-id: - - b11fea20-5f1a-11ea-8824-8c8590507855 - strict-transport-security: - - max-age=15724800; includeSubDomains - vary: - - Accept-Encoding - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json;odata.metadata=none - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - User-Agent: - - azsdk-python-core/1.2.3 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) azsdk-python-searchindexclient/unknown - api-key: - - 48DA7A245D1D909C2DDD5B913A209CA3 - method: GET - uri: https://searchcc370cd3.search.windows.net/indexes('drgqefsg')/docs('1001')?api-version=2019-05-06 - response: - body: - string: '{"hotelId":"1001","hotelName":"Redmond Hotel","description":null,"descriptionFr":null,"category":null,"tags":[],"parkingIncluded":null,"smokingAllowed":null,"lastRenovationDate":null,"rating":4,"location":null,"address":null,"rooms":[]}' - headers: - cache-control: - - no-cache - content-length: - - '236' - content-type: - - application/json; odata.metadata=none - date: - - Thu, 05 Mar 2020 19:51:23 GMT - elapsed-time: - - '3' - expires: - - '-1' - odata-version: - - '4.0' - pragma: - - no-cache - preference-applied: - - odata.include-annotations="*" - request-id: - - b1282776-5f1a-11ea-8824-8c8590507855 - strict-transport-security: - - max-age=15724800; includeSubDomains - vary: - - Accept-Encoding - status: - code: 200 - message: OK -version: 1 diff --git a/sdk/search/azure-search-documents/tests/recordings/test_service_live.test_get_service_statistics.yaml b/sdk/search/azure-search-documents/tests/recordings/test_service_live.test_get_service_statistics.yaml new file mode 100644 index 0000000000000..ce91e406d0fc7 --- /dev/null +++ b/sdk/search/azure-search-documents/tests/recordings/test_service_live.test_get_service_statistics.yaml @@ -0,0 +1,48 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json;odata.metadata=minimal + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-search/1.0.0b2 Python/3.7.3 (Darwin-19.3.0-x86_64-i386-64bit) + api-key: + - E505C9497247847B7216439F90BA1752 + method: GET + uri: https://searchabe712a7.search.windows.net/servicestats?api-version=2019-05-06-Preview + response: + body: + string: '{"@odata.context":"https://searchabe712a7.search.windows.net/$metadata#Microsoft.Azure.Search.V2019_05_06_Preview.ServiceStatistics","counters":{"documentCount":{"usage":0,"quota":null},"indexesCount":{"usage":0,"quota":3},"indexersCount":{"usage":0,"quota":3},"dataSourcesCount":{"usage":0,"quota":3},"storageSize":{"usage":0,"quota":52428800},"synonymMaps":{"usage":0,"quota":3},"skillsetCount":{"usage":0,"quota":3}},"limits":{"maxFieldsPerIndex":1000,"maxFieldNestingDepthPerIndex":10,"maxComplexCollectionFieldsPerIndex":40,"maxComplexObjectsInCollectionsPerDocument":3000}}' + headers: + cache-control: + - no-cache + content-length: + - '579' + content-type: + - application/json; odata.metadata=minimal + date: + - Thu, 19 Mar 2020 19:50:54 GMT + elapsed-time: + - '42' + expires: + - '-1' + odata-version: + - '4.0' + pragma: + - no-cache + preference-applied: + - odata.include-annotations="*" + request-id: + - f116caa8-6a1a-11ea-8e1b-8c8590507855 + strict-transport-security: + - max-age=15724800; includeSubDomains + vary: + - Accept-Encoding + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/search/azure-search-documents/tests/search_service_preparer.py b/sdk/search/azure-search-documents/tests/search_service_preparer.py index 3e3dcde24e41d..913cd2308e402 100644 --- a/sdk/search/azure-search-documents/tests/search_service_preparer.py +++ b/sdk/search/azure-search-documents/tests/search_service_preparer.py @@ -24,7 +24,7 @@ class SearchServicePreparer(AzureMgmtPreparer): def __init__( self, - schema, + schema=None, index_batch=None, name_prefix="search", resource_group_parameter_name=RESOURCE_GROUP_PARAM, @@ -56,14 +56,17 @@ def _get_resource_group(self, **kwargs): raise AzureTestError(template.format(ResourceGroupPreparer.__name__)) def create_resource(self, name, **kwargs): - schema = json.loads(self.schema) + if self.schema: + schema = json.loads(self.schema) + else: + schema = None self.service_name = self.create_random_name() self.endpoint = "https://{}.search.windows.net".format(self.service_name) if not self.is_live: return { "api_key": "api-key", - "index_name": schema["name"], + "index_name": schema["name"] if schema else None, "endpoint": self.endpoint, } @@ -103,19 +106,20 @@ def create_resource(self, name, **kwargs): group_name, self.service_name ).primary_key - response = requests.post( - SERVICE_URL_FMT.format(self.service_name), - headers={"Content-Type": "application/json", "api-key": api_key}, - data=self.schema, - ) - if response.status_code != 201: - raise AzureTestError( - "Could not create a search index {}".format(response.status_code) + if self.schema: + response = requests.post( + SERVICE_URL_FMT.format(self.service_name), + headers={"Content-Type": "application/json", "api-key": api_key}, + data=self.schema, ) - self.index_name = schema["name"] + if response.status_code != 201: + raise AzureTestError( + "Could not create a search index {}".format(response.status_code) + ) + self.index_name = schema["name"] # optionally load data into the index - if self.index_batch: + if self.index_batch and self.schema: from azure.search.documents import SearchIndexClient, SearchApiKeyCredential from azure.search.documents._index._generated.models import IndexBatch diff --git a/sdk/search/azure-search-documents/tests/test_live.py b/sdk/search/azure-search-documents/tests/test_index_live.py similarity index 95% rename from sdk/search/azure-search-documents/tests/test_live.py rename to sdk/search/azure-search-documents/tests/test_index_live.py index f82ce66bb580e..da12b8d721be7 100644 --- a/sdk/search/azure-search-documents/tests/test_live.py +++ b/sdk/search/azure-search-documents/tests/test_index_live.py @@ -153,7 +153,12 @@ def test_get_search_facets_result(self, api_key, endpoint, index_name, **kwargs) query.select("hotelName", "category", "description") results = client.search(query=query) - assert results.get_facets() == {'category': [{'value': 'Budget', 'count': 4}, {'value': 'Luxury', 'count': 1}]} + assert results.get_facets() == { + "category": [ + {"value": "Budget", "count": 4}, + {"value": "Luxury", "count": 1}, + ] + } @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) @@ -205,9 +210,7 @@ def test_upload_documents_new(self, api_key, endpoint, index_name, **kwargs): @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) - def test_upload_documents_existing( - self, api_key, endpoint, index_name, **kwargs - ): + def test_upload_documents_existing(self, api_key, endpoint, index_name, **kwargs): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) @@ -221,9 +224,7 @@ def test_upload_documents_existing( @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) - def test_delete_documents_existing( - self, api_key, endpoint, index_name, **kwargs - ): + def test_delete_documents_existing(self, api_key, endpoint, index_name, **kwargs): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) @@ -244,9 +245,7 @@ def test_delete_documents_existing( @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) - def test_delete_documents_missing( - self, api_key, endpoint, index_name, **kwargs - ): + def test_delete_documents_missing(self, api_key, endpoint, index_name, **kwargs): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) @@ -267,9 +266,7 @@ def test_delete_documents_missing( @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) - def test_merge_documents_existing( - self, api_key, endpoint, index_name, **kwargs - ): + def test_merge_documents_existing(self, api_key, endpoint, index_name, **kwargs): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) @@ -315,9 +312,7 @@ def test_merge_documents_missing(self, api_key, endpoint, index_name, **kwargs): @ResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) - def test_merge_or_upload_documents( - self, api_key, endpoint, index_name, **kwargs - ): + def test_merge_or_upload_documents(self, api_key, endpoint, index_name, **kwargs): client = SearchIndexClient( endpoint, index_name, SearchApiKeyCredential(api_key) ) diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client.py b/sdk/search/azure-search-documents/tests/test_search_index_client.py index d49716cb9fc5b..26eaefafa6adf 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client.py @@ -43,7 +43,6 @@ zip(CRUD_METHOD_NAMES, ["upload", "delete", "merge", "mergeOrUpload"]) ) -ENDPOINT = "https://" class Test_odata(object): def test_const(self): diff --git a/sdk/search/azure-search-documents/tests/test_search_service_client.py b/sdk/search/azure-search-documents/tests/test_search_service_client.py new file mode 100644 index 0000000000000..061dc27cc7c10 --- /dev/null +++ b/sdk/search/azure-search-documents/tests/test_search_service_client.py @@ -0,0 +1,40 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import pytest + +try: + from unittest import mock +except ImportError: + import mock + +from azure.search.documents import SearchApiKeyCredential, SearchServiceClient + +CREDENTIAL = SearchApiKeyCredential(api_key="test_api_key") + + +class TestSearchServiceClient(object): + def test_init(self): + client = SearchServiceClient("endpoint", CREDENTIAL) + assert client._client._config.headers_policy.headers == { + "api-key": "test_api_key", + "Accept": "application/json;odata.metadata=minimal", + } + + def test_repr(self): + client = SearchServiceClient("endpoint", CREDENTIAL) + assert repr(client) == "".format( + repr("endpoint") + ) + + @mock.patch( + "azure.search.documents._service._generated._search_service_client.SearchServiceClient.get_service_statistics" + ) + def test_get_service_statistics(self, mock_get_stats): + client = SearchServiceClient("endpoint", CREDENTIAL) + client.get_service_statistics() + assert mock_get_stats.called + assert mock_get_stats.call_args[0] == () + assert mock_get_stats.call_args[1] == {} diff --git a/sdk/search/azure-search-documents/tests/test_service_live.py b/sdk/search/azure-search-documents/tests/test_service_live.py new file mode 100644 index 0000000000000..18364053cc54c --- /dev/null +++ b/sdk/search/azure-search-documents/tests/test_service_live.py @@ -0,0 +1,27 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import json +from os.path import dirname, join, realpath +import time + +import pytest + +from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer + +from search_service_preparer import SearchServicePreparer + +from azure.core.exceptions import HttpResponseError +from azure.search.documents import SearchServiceClient, SearchApiKeyCredential + + +class SearchIndexClientTest(AzureMgmtTestCase): + @ResourceGroupPreparer(random_name_enabled=True) + @SearchServicePreparer() + def test_get_service_statistics(self, api_key, endpoint, **kwargs): + client = SearchServiceClient(endpoint, SearchApiKeyCredential(api_key)) + result = client.get_service_statistics() + assert isinstance(result, dict) + assert set(result.keys()) == {"counters", "limits"}