diff --git a/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md b/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md index f05e021f850a4..f08ca18b2e5a3 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md +++ b/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md @@ -5,6 +5,9 @@ - `score` attribute has been renamed to `confidence_score` for the `CategorizedEntity`, `LinkedEntityMatch`, and `PiiEntity` models +**Breaking changes** +- All input parameters `inputs` have been renamed to `documents` + ## 1.0.0b3 (2020-03-10) diff --git a/sdk/textanalytics/azure-ai-textanalytics/README.md b/sdk/textanalytics/azure-ai-textanalytics/README.md index 7072d674ec6dd..d600984f88cc1 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/README.md +++ b/sdk/textanalytics/azure-ai-textanalytics/README.md @@ -138,7 +138,7 @@ The input for each operation is passed as a **list** of documents. Each document can be passed as a string in the list, e.g. ```python -docs = ["I hated the movie. It was so slow!", "The movie made it into my top ten favorites.", "What a great movie!"] +documents = ["I hated the movie. It was so slow!", "The movie made it into my top ten favorites.", "What a great movie!"] ``` or, if you wish to pass in a per-item document `id` or `language`/`country_hint`, they can be passed as a list of @@ -147,7 +147,7 @@ or, if you wish to pass in a per-item document `id` or `language`/`country_hint` or a dict-like representation of the object: ```python -inputs = [ +documents = [ {"id": "1", "language": "en", "text": "I hated the movie. It was so slow!"}, {"id": "2", "language": "en", "text": "The movie made it into my top ten favorites."}, {"id": "3", "language": "en", "text": "What a great movie!"} diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py index 221d0fbc9b054..bf54aa5c8b754 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py @@ -95,7 +95,7 @@ def __init__(self, endpoint, credential, **kwargs): @distributed_trace def detect_language( # type: ignore self, - inputs, # type: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]] + documents, # type: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]] **kwargs # type: Any ): # type: (...) -> List[Union[DetectLanguageResult, DocumentError]] @@ -108,12 +108,12 @@ def detect_language( # type: ignore See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits for document length limits, maximum batch size, and supported text encoding. - :param inputs: The set of documents to process as part of this batch. + :param documents: The set of documents to process as part of this batch. If you wish to specify the ID and country_hint on a per-item basis you must use as input a list[:class:`~azure.ai.textanalytics.DetectLanguageInput`] or a list of dict representations of :class:`~azure.ai.textanalytics.DetectLanguageInput`, like `{"id": "1", "country_hint": "us", "text": "hello world"}`. - :type inputs: + :type documents: list[str] or list[~azure.ai.textanalytics.DetectLanguageInput] :keyword str country_hint: A country hint for the entire batch. Accepts two letter country codes specified by ISO 3166-1 alpha-2. Per-document @@ -142,7 +142,7 @@ def detect_language( # type: ignore """ country_hint_arg = kwargs.pop("country_hint", None) country_hint = country_hint_arg if country_hint_arg is not None else self._default_country_hint - docs = _validate_batch_input(inputs, "country_hint", country_hint) + docs = _validate_batch_input(documents, "country_hint", country_hint) model_version = kwargs.pop("model_version", None) show_stats = kwargs.pop("show_stats", False) try: @@ -159,7 +159,7 @@ def detect_language( # type: ignore @distributed_trace def recognize_entities( # type: ignore self, - inputs, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]] + documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]] **kwargs # type: Any ): # type: (...) -> List[Union[RecognizeEntitiesResult, DocumentError]] @@ -172,12 +172,12 @@ def recognize_entities( # type: ignore See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits for document length limits, maximum batch size, and supported text encoding. - :param inputs: The set of documents to process as part of this batch. + :param documents: The set of documents to process as part of this batch. If you wish to specify the ID and language on a per-item basis you must use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like `{"id": "1", "language": "en", "text": "hello world"}`. - :type inputs: + :type documents: list[str] or list[~azure.ai.textanalytics.TextDocumentInput] :keyword str language: The 2 letter ISO 639-1 representation of language for the entire batch. For example, use "en" for English; "es" for Spanish etc. @@ -206,7 +206,7 @@ def recognize_entities( # type: ignore """ language_arg = kwargs.pop("language", None) language = language_arg if language_arg is not None else self._default_language - docs = _validate_batch_input(inputs, "language", language) + docs = _validate_batch_input(documents, "language", language) model_version = kwargs.pop("model_version", None) show_stats = kwargs.pop("show_stats", False) try: @@ -223,7 +223,7 @@ def recognize_entities( # type: ignore @distributed_trace def recognize_pii_entities( # type: ignore self, - inputs, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]] + documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]] **kwargs # type: Any ): # type: (...) -> List[Union[RecognizePiiEntitiesResult, DocumentError]] @@ -236,12 +236,12 @@ def recognize_pii_entities( # type: ignore See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits for document length limits, maximum batch size, and supported text encoding. - :param inputs: The set of documents to process as part of this batch. + :param documents: The set of documents to process as part of this batch. If you wish to specify the ID and language on a per-item basis you must use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like `{"id": "1", "language": "en", "text": "hello world"}`. - :type inputs: + :type documents: list[str] or list[~azure.ai.textanalytics.TextDocumentInput] :keyword str language: The 2 letter ISO 639-1 representation of language for the entire batch. For example, use "en" for English; "es" for Spanish etc. @@ -270,7 +270,7 @@ def recognize_pii_entities( # type: ignore """ language_arg = kwargs.pop("language", None) language = language_arg if language_arg is not None else self._default_language - docs = _validate_batch_input(inputs, "language", language) + docs = _validate_batch_input(documents, "language", language) model_version = kwargs.pop("model_version", None) show_stats = kwargs.pop("show_stats", False) try: @@ -287,7 +287,7 @@ def recognize_pii_entities( # type: ignore @distributed_trace def recognize_linked_entities( # type: ignore self, - inputs, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]] + documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]] **kwargs # type: Any ): # type: (...) -> List[Union[RecognizeLinkedEntitiesResult, DocumentError]] @@ -301,12 +301,12 @@ def recognize_linked_entities( # type: ignore See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits for document length limits, maximum batch size, and supported text encoding. - :param inputs: The set of documents to process as part of this batch. + :param documents: The set of documents to process as part of this batch. If you wish to specify the ID and language on a per-item basis you must use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like `{"id": "1", "language": "en", "text": "hello world"}`. - :type inputs: + :type documents: list[str] or list[~azure.ai.textanalytics.TextDocumentInput] :keyword str language: The 2 letter ISO 639-1 representation of language for the entire batch. For example, use "en" for English; "es" for Spanish etc. @@ -335,7 +335,7 @@ def recognize_linked_entities( # type: ignore """ language_arg = kwargs.pop("language", None) language = language_arg if language_arg is not None else self._default_language - docs = _validate_batch_input(inputs, "language", language) + docs = _validate_batch_input(documents, "language", language) model_version = kwargs.pop("model_version", None) show_stats = kwargs.pop("show_stats", False) try: @@ -352,7 +352,7 @@ def recognize_linked_entities( # type: ignore @distributed_trace def extract_key_phrases( # type: ignore self, - inputs, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]] + documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]] **kwargs # type: Any ): # type: (...) -> List[Union[ExtractKeyPhrasesResult, DocumentError]] @@ -366,12 +366,12 @@ def extract_key_phrases( # type: ignore See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits for document length limits, maximum batch size, and supported text encoding. - :param inputs: The set of documents to process as part of this batch. + :param documents: The set of documents to process as part of this batch. If you wish to specify the ID and language on a per-item basis you must use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like `{"id": "1", "language": "en", "text": "hello world"}`. - :type inputs: + :type documents: list[str] or list[~azure.ai.textanalytics.TextDocumentInput] :keyword str language: The 2 letter ISO 639-1 representation of language for the entire batch. For example, use "en" for English; "es" for Spanish etc. @@ -400,7 +400,7 @@ def extract_key_phrases( # type: ignore """ language_arg = kwargs.pop("language", None) language = language_arg if language_arg is not None else self._default_language - docs = _validate_batch_input(inputs, "language", language) + docs = _validate_batch_input(documents, "language", language) model_version = kwargs.pop("model_version", None) show_stats = kwargs.pop("show_stats", False) try: @@ -417,7 +417,7 @@ def extract_key_phrases( # type: ignore @distributed_trace def analyze_sentiment( # type: ignore self, - inputs, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]] + documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]] **kwargs # type: Any ): # type: (...) -> List[Union[AnalyzeSentimentResult, DocumentError]] @@ -430,12 +430,12 @@ def analyze_sentiment( # type: ignore See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits for document length limits, maximum batch size, and supported text encoding. - :param inputs: The set of documents to process as part of this batch. + :param documents: The set of documents to process as part of this batch. If you wish to specify the ID and language on a per-item basis you must use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like `{"id": "1", "language": "en", "text": "hello world"}`. - :type inputs: + :type documents: list[str] or list[~azure.ai.textanalytics.TextDocumentInput] :keyword str language: The 2 letter ISO 639-1 representation of language for the entire batch. For example, use "en" for English; "es" for Spanish etc. @@ -464,7 +464,7 @@ def analyze_sentiment( # type: ignore """ language_arg = kwargs.pop("language", None) language = language_arg if language_arg is not None else self._default_language - docs = _validate_batch_input(inputs, "language", language) + docs = _validate_batch_input(documents, "language", language) model_version = kwargs.pop("model_version", None) show_stats = kwargs.pop("show_stats", False) try: diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py index 59fccb1b5a858..adab4ac9a3a34 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py @@ -99,7 +99,7 @@ def __init__( # type: ignore @distributed_trace_async async def detect_language( # type: ignore self, - inputs: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]], + documents: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]], **kwargs: Any ) -> List[Union[DetectLanguageResult, DocumentError]]: """Detects Language for a batch of documents. @@ -111,12 +111,12 @@ async def detect_language( # type: ignore See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits for document length limits, maximum batch size, and supported text encoding. - :param inputs: The set of documents to process as part of this batch. + :param documents: The set of documents to process as part of this batch. If you wish to specify the ID and country_hint on a per-item basis you must use as input a list[:class:`~azure.ai.textanalytics.DetectLanguageInput`] or a list of dict representations of :class:`~azure.ai.textanalytics.DetectLanguageInput`, like `{"id": "1", "country_hint": "us", "text": "hello world"}`. - :type inputs: + :type documents: list[str] or list[~azure.ai.textanalytics.DetectLanguageInput] :keyword str country_hint: A country hint for the entire batch. Accepts two letter country codes specified by ISO 3166-1 alpha-2. Per-document @@ -145,7 +145,7 @@ async def detect_language( # type: ignore """ country_hint_arg = kwargs.pop("country_hint", None) country_hint = country_hint_arg if country_hint_arg is not None else self._default_country_hint - docs = _validate_batch_input(inputs, "country_hint", country_hint) + docs = _validate_batch_input(documents, "country_hint", country_hint) model_version = kwargs.pop("model_version", None) show_stats = kwargs.pop("show_stats", False) try: @@ -162,7 +162,7 @@ async def detect_language( # type: ignore @distributed_trace_async async def recognize_entities( # type: ignore self, - inputs: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]], + documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]], **kwargs: Any ) -> List[Union[RecognizeEntitiesResult, DocumentError]]: """Entity Recognition for a batch of documents. @@ -174,12 +174,12 @@ async def recognize_entities( # type: ignore See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits for document length limits, maximum batch size, and supported text encoding. - :param inputs: The set of documents to process as part of this batch. + :param documents: The set of documents to process as part of this batch. If you wish to specify the ID and language on a per-item basis you must use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like `{"id": "1", "language": "en", "text": "hello world"}`. - :type inputs: + :type documents: list[str] or list[~azure.ai.textanalytics.TextDocumentInput] :keyword str language: The 2 letter ISO 639-1 representation of language for the entire batch. For example, use "en" for English; "es" for Spanish etc. @@ -208,7 +208,7 @@ async def recognize_entities( # type: ignore """ language_arg = kwargs.pop("language", None) language = language_arg if language_arg is not None else self._default_language - docs = _validate_batch_input(inputs, "language", language) + docs = _validate_batch_input(documents, "language", language) model_version = kwargs.pop("model_version", None) show_stats = kwargs.pop("show_stats", False) try: @@ -225,7 +225,7 @@ async def recognize_entities( # type: ignore @distributed_trace_async async def recognize_pii_entities( # type: ignore self, - inputs: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]], + documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]], **kwargs: Any ) -> List[Union[RecognizePiiEntitiesResult, DocumentError]]: """Recognize entities containing personal information for a batch of documents. @@ -237,12 +237,12 @@ async def recognize_pii_entities( # type: ignore See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits for document length limits, maximum batch size, and supported text encoding. - :param inputs: The set of documents to process as part of this batch. + :param documents: The set of documents to process as part of this batch. If you wish to specify the ID and language on a per-item basis you must use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like `{"id": "1", "language": "en", "text": "hello world"}`. - :type inputs: + :type documents: list[str] or list[~azure.ai.textanalytics.TextDocumentInput] :keyword str language: The 2 letter ISO 639-1 representation of language for the entire batch. For example, use "en" for English; "es" for Spanish etc. @@ -271,7 +271,7 @@ async def recognize_pii_entities( # type: ignore """ language_arg = kwargs.pop("language", None) language = language_arg if language_arg is not None else self._default_language - docs = _validate_batch_input(inputs, "language", language) + docs = _validate_batch_input(documents, "language", language) model_version = kwargs.pop("model_version", None) show_stats = kwargs.pop("show_stats", False) try: @@ -288,7 +288,7 @@ async def recognize_pii_entities( # type: ignore @distributed_trace_async async def recognize_linked_entities( # type: ignore self, - inputs: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]], + documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]], **kwargs: Any ) -> List[Union[RecognizeLinkedEntitiesResult, DocumentError]]: """Recognize linked entities from a well-known knowledge base for a batch of documents. @@ -301,12 +301,12 @@ async def recognize_linked_entities( # type: ignore See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits for document length limits, maximum batch size, and supported text encoding. - :param inputs: The set of documents to process as part of this batch. + :param documents: The set of documents to process as part of this batch. If you wish to specify the ID and language on a per-item basis you must use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like `{"id": "1", "language": "en", "text": "hello world"}`. - :type inputs: + :type documents: list[str] or list[~azure.ai.textanalytics.TextDocumentInput] :keyword str language: The 2 letter ISO 639-1 representation of language for the entire batch. For example, use "en" for English; "es" for Spanish etc. @@ -335,7 +335,7 @@ async def recognize_linked_entities( # type: ignore """ language_arg = kwargs.pop("language", None) language = language_arg if language_arg is not None else self._default_language - docs = _validate_batch_input(inputs, "language", language) + docs = _validate_batch_input(documents, "language", language) model_version = kwargs.pop("model_version", None) show_stats = kwargs.pop("show_stats", False) try: @@ -352,7 +352,7 @@ async def recognize_linked_entities( # type: ignore @distributed_trace_async async def extract_key_phrases( # type: ignore self, - inputs: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]], + documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]], **kwargs: Any ) -> List[Union[ExtractKeyPhrasesResult, DocumentError]]: """Extract Key Phrases from a batch of documents. @@ -365,12 +365,12 @@ async def extract_key_phrases( # type: ignore See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits for document length limits, maximum batch size, and supported text encoding. - :param inputs: The set of documents to process as part of this batch. + :param documents: The set of documents to process as part of this batch. If you wish to specify the ID and language on a per-item basis you must use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like `{"id": "1", "language": "en", "text": "hello world"}`. - :type inputs: + :type documents: list[str] or list[~azure.ai.textanalytics.TextDocumentInput] :keyword str language: The 2 letter ISO 639-1 representation of language for the entire batch. For example, use "en" for English; "es" for Spanish etc. @@ -399,7 +399,7 @@ async def extract_key_phrases( # type: ignore """ language_arg = kwargs.pop("language", None) language = language_arg if language_arg is not None else self._default_language - docs = _validate_batch_input(inputs, "language", language) + docs = _validate_batch_input(documents, "language", language) model_version = kwargs.pop("model_version", None) show_stats = kwargs.pop("show_stats", False) try: @@ -416,7 +416,7 @@ async def extract_key_phrases( # type: ignore @distributed_trace_async async def analyze_sentiment( # type: ignore self, - inputs: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]], + documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]], **kwargs: Any ) -> List[Union[AnalyzeSentimentResult, DocumentError]]: """Analyze sentiment for a batch of documents. @@ -428,12 +428,12 @@ async def analyze_sentiment( # type: ignore See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits for document length limits, maximum batch size, and supported text encoding. - :param inputs: The set of documents to process as part of this batch. + :param documents: The set of documents to process as part of this batch. If you wish to specify the ID and language on a per-item basis you must use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like `{"id": "1", "language": "en", "text": "hello world"}`. - :type inputs: + :type documents: list[str] or list[~azure.ai.textanalytics.TextDocumentInput] :keyword str language: The 2 letter ISO 639-1 representation of language for the entire batch. For example, use "en" for English; "es" for Spanish etc. @@ -462,7 +462,7 @@ async def analyze_sentiment( # type: ignore """ language_arg = kwargs.pop("language", None) language = language_arg if language_arg is not None else self._default_language - docs = _validate_batch_input(inputs, "language", language) + docs = _validate_batch_input(documents, "language", language) model_version = kwargs.pop("model_version", None) show_stats = kwargs.pop("show_stats", False) try: diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment.py index 21f524ff68a01..bdf22054907ae 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment.py @@ -127,7 +127,7 @@ def test_bad_model_version(self, resource_group, location, text_analytics_accoun text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key)) with self.assertRaises(HttpResponseError): response = text_analytics.analyze_sentiment( - inputs=["Microsoft was founded by Bill Gates."], + documents=["Microsoft was founded by Bill Gates."], model_version="old" ) @@ -496,7 +496,7 @@ def callback(response): self.assertIsNotNone(response.statistics) res = text_analytics.analyze_sentiment( - inputs=["Bill Gates is the CEO of Microsoft."], + documents=["Bill Gates is the CEO of Microsoft."], model_version="latest", show_stats=True, language="es", diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py index f2f16bf524766..ce54631f723e2 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py @@ -151,7 +151,7 @@ async def test_bad_model_version(self, resource_group, location, text_analytics_ text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key)) with self.assertRaises(HttpResponseError): response = await text_analytics.analyze_sentiment( - inputs=["Microsoft was founded by Bill Gates."], + documents=["Microsoft was founded by Bill Gates."], model_version="old" ) @@ -544,7 +544,7 @@ def callback(response): self.assertIsNotNone(response.statistics) res = await text_analytics.analyze_sentiment( - inputs=["Bill Gates is the CEO of Microsoft."], + documents=["Bill Gates is the CEO of Microsoft."], model_version="latest", show_stats=True, language="es", diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language.py index 6c5f4d24ef548..1799d02dc5d47 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language.py @@ -149,7 +149,7 @@ def test_bad_model_version(self, resource_group, location, text_analytics_accoun text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key)) with self.assertRaises(HttpResponseError): response = text_analytics.detect_language( - inputs=["Microsoft was founded by Bill Gates."], + documents=["Microsoft was founded by Bill Gates."], model_version="old" ) @@ -533,10 +533,10 @@ def callback(response): # test DetectLanguageInput result2 = text_analytics.detect_language(documents2, raw_response_hook=callback) # test per-operation - result3 = text_analytics.detect_language(inputs=["this is written in english"], country_hint="none", raw_response_hook=callback) + result3 = text_analytics.detect_language(documents=["this is written in english"], country_hint="none", raw_response_hook=callback) # test client default new_client = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key), default_country_hint="none") - result4 = new_client.detect_language(inputs=["this is written in english"], raw_response_hook=callback) + result4 = new_client.detect_language(documents=["this is written in english"], raw_response_hook=callback) @GlobalTextAnalyticsAccountPreparer() def test_country_hint_kwarg(self, resource_group, location, text_analytics_account, text_analytics_account_key): @@ -549,7 +549,7 @@ def callback(response): self.assertIsNotNone(response.statistics) res = text_analytics.detect_language( - inputs=["this is written in english"], + documents=["this is written in english"], model_version="latest", show_stats=True, country_hint="ES", diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language_async.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language_async.py index aa36ff5a1ae8a..e7c3c96bf2e3f 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language_async.py @@ -172,7 +172,7 @@ async def test_bad_model_version(self, resource_group, location, text_analytics_ text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key)) with self.assertRaises(HttpResponseError): response = await text_analytics.detect_language( - inputs=["Microsoft was founded by Bill Gates."], + documents=["Microsoft was founded by Bill Gates."], model_version="old" ) @@ -193,7 +193,7 @@ async def test_mixing_inputs(self, resource_group, location, text_analytics_acco docs = [ {"id": "1", "text": "Microsoft was founded by Bill Gates and Paul Allen."}, TextDocumentInput(id="2", text="I did not like the hotel we stayed at. It was too expensive."), - u"You cannot mix string input with the above inputs" + u"You cannot mix string input with the above documents" ] with self.assertRaises(TypeError): response = await text_analytics.detect_language(docs) @@ -579,10 +579,10 @@ def callback(response): # test DetectLanguageInput result2 = await text_analytics.detect_language(documents2, raw_response_hook=callback) # test per-operation - result3 = await text_analytics.detect_language(inputs=["this is written in english"], country_hint="none", raw_response_hook=callback) + result3 = await text_analytics.detect_language(documents=["this is written in english"], country_hint="none", raw_response_hook=callback) # test client default new_client = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key), default_country_hint="none") - result4 = await new_client.detect_language(inputs=["this is written in english"], raw_response_hook=callback) + result4 = await new_client.detect_language(documents=["this is written in english"], raw_response_hook=callback) @GlobalTextAnalyticsAccountPreparer() @AsyncTextAnalyticsTest.await_prepared_test @@ -596,7 +596,7 @@ def callback(response): self.assertIsNotNone(response.statistics) res = await text_analytics.detect_language( - inputs=["this is written in english"], + documents=["this is written in english"], model_version="latest", show_stats=True, country_hint="ES", diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases.py index 558ff271edd28..4d1df8d562199 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases.py @@ -112,7 +112,7 @@ def test_bad_model_version(self, resource_group, location, text_analytics_accoun text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key)) with self.assertRaises(HttpResponseError): response = text_analytics.extract_key_phrases( - inputs=["Microsoft was founded by Bill Gates."], + documents=["Microsoft was founded by Bill Gates."], model_version="old" ) @@ -465,7 +465,7 @@ def callback(response): self.assertIsNotNone(response.statistics) res = text_analytics.extract_key_phrases( - inputs=["Bill Gates is the CEO of Microsoft."], + documents=["Bill Gates is the CEO of Microsoft."], model_version="latest", show_stats=True, language="es", diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases_async.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases_async.py index e05ea66b0c74a..a8813e8619cca 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases_async.py @@ -136,7 +136,7 @@ async def test_bad_model_version(self, resource_group, location, text_analytics_ text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key)) with self.assertRaises(HttpResponseError): response = await text_analytics.extract_key_phrases( - inputs=["Microsoft was founded by Bill Gates."], + documents=["Microsoft was founded by Bill Gates."], model_version="old" ) @@ -513,7 +513,7 @@ def callback(response): self.assertIsNotNone(response.statistics) res = await text_analytics.extract_key_phrases( - inputs=["Bill Gates is the CEO of Microsoft."], + documents=["Bill Gates is the CEO of Microsoft."], model_version="latest", show_stats=True, language="es", diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities.py index 878b564bbdf66..07745bd47a1fb 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities.py @@ -127,7 +127,7 @@ def test_bad_model_version(self, resource_group, location, text_analytics_accoun text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key)) with self.assertRaises(HttpResponseError): response = text_analytics.recognize_entities( - inputs=["Microsoft was founded by Bill Gates."], + documents=["Microsoft was founded by Bill Gates."], model_version="old" ) @@ -480,7 +480,7 @@ def callback(response): self.assertIsNotNone(response.statistics) res = text_analytics.recognize_entities( - inputs=["Bill Gates is the CEO of Microsoft."], + documents=["Bill Gates is the CEO of Microsoft."], model_version="latest", show_stats=True, language="es", diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities_async.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities_async.py index 3eebafdd3b265..6754a26ee43c0 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities_async.py @@ -151,7 +151,7 @@ async def test_bad_model_version(self, resource_group, location, text_analytics_ text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key)) with self.assertRaises(HttpResponseError): response = await text_analytics.recognize_entities( - inputs=["Microsoft was founded by Bill Gates."], + documents=["Microsoft was founded by Bill Gates."], model_version="old" ) @@ -525,7 +525,7 @@ def callback(response): self.assertIsNotNone(response.statistics) res = await text_analytics.recognize_entities( - inputs=["Bill Gates is the CEO of Microsoft."], + documents=["Bill Gates is the CEO of Microsoft."], model_version="latest", show_stats=True, language="es", diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities.py index 61826105d5936..68f79f8dfbd4f 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities.py @@ -123,7 +123,7 @@ def test_bad_model_version(self, resource_group, location, text_analytics_accoun text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key)) with self.assertRaises(HttpResponseError): response = text_analytics.recognize_linked_entities( - inputs=["Microsoft was founded by Bill Gates."], + documents=["Microsoft was founded by Bill Gates."], model_version="old" ) @@ -475,7 +475,7 @@ def callback(response): self.assertIsNotNone(response.statistics) res = text_analytics.recognize_linked_entities( - inputs=["Bill Gates is the CEO of Microsoft."], + documents=["Bill Gates is the CEO of Microsoft."], model_version="latest", show_stats=True, language="es", diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities_async.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities_async.py index 0a7b39ae52c68..904f3080a2ace 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities_async.py @@ -147,7 +147,7 @@ async def test_bad_model_version(self, resource_group, location, text_analytics_ text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key)) with self.assertRaises(HttpResponseError): response = await text_analytics.recognize_linked_entities( - inputs=["Microsoft was founded by Bill Gates."], + documents=["Microsoft was founded by Bill Gates."], model_version="old" ) @@ -521,7 +521,7 @@ def callback(response): self.assertIsNotNone(response.statistics) res = await text_analytics.recognize_linked_entities( - inputs=["Bill Gates is the CEO of Microsoft."], + documents=["Bill Gates is the CEO of Microsoft."], model_version="latest", show_stats=True, language="es", diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_pii_entities.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_pii_entities.py index 97e5e7a987a75..7a2119e9c0eeb 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_pii_entities.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_pii_entities.py @@ -150,7 +150,7 @@ def test_bad_model_version(self, resource_group, location, text_analytics_accoun text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key)) with self.assertRaises(HttpResponseError): response = text_analytics.recognize_pii_entities( - inputs=["Microsoft was founded by Bill Gates."], + documents=["Microsoft was founded by Bill Gates."], model_version="old" ) @@ -503,7 +503,7 @@ def callback(response): self.assertIsNotNone(response.statistics) res = text_analytics.recognize_pii_entities( - inputs=["Bill Gates is the CEO of Microsoft."], + documents=["Bill Gates is the CEO of Microsoft."], model_version="latest", show_stats=True, language="en", diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_pii_entities_async.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_pii_entities_async.py index 4255e895c722c..661ffefa2e33f 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_pii_entities_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_pii_entities_async.py @@ -175,7 +175,7 @@ async def test_bad_model_version(self, resource_group, location, text_analytics_ text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key)) with self.assertRaises(HttpResponseError): response = await text_analytics.recognize_pii_entities( - inputs=["Microsoft was founded by Bill Gates."], + documents=["Microsoft was founded by Bill Gates."], model_version="old" ) @@ -549,7 +549,7 @@ def callback(response): self.assertIsNotNone(response.statistics) res = await text_analytics.recognize_pii_entities( - inputs=["Bill Gates is the CEO of Microsoft."], + documents=["Bill Gates is the CEO of Microsoft."], model_version="latest", show_stats=True, language="en", diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_text_analytics.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_text_analytics.py index 0b229ecabcd47..0f30aca4c998a 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_text_analytics.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_text_analytics.py @@ -18,7 +18,7 @@ def test_detect_language(self, resource_group, location, text_analytics_account, text_analytics = TextAnalyticsClient(text_analytics_account, TextAnalyticsApiKeyCredential(text_analytics_account_key)) response = text_analytics.detect_language( - inputs=[{ + documents=[{ 'id': 1, 'text': 'I had a wonderful experience! The rooms were wonderful and the staff was helpful.' }]