Skip to content

Commit

Permalink
[text analytics] Input parameter rename from inputs to documents (#10300
Browse files Browse the repository at this point in the history
)
  • Loading branch information
iscai-msft authored Mar 16, 2020
1 parent c6be5cd commit 6317e09
Show file tree
Hide file tree
Showing 17 changed files with 83 additions and 80 deletions.
3 changes: 3 additions & 0 deletions sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
- `score` attribute has been renamed to `confidence_score` for the `CategorizedEntity`, `LinkedEntityMatch`, and
`PiiEntity` models

**Breaking changes**
- All input parameters `inputs` have been renamed to `documents`


## 1.0.0b3 (2020-03-10)

Expand Down
4 changes: 2 additions & 2 deletions sdk/textanalytics/azure-ai-textanalytics/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ The input for each operation is passed as a **list** of documents.

Each document can be passed as a string in the list, e.g.
```python
docs = ["I hated the movie. It was so slow!", "The movie made it into my top ten favorites.", "What a great movie!"]
documents = ["I hated the movie. It was so slow!", "The movie made it into my top ten favorites.", "What a great movie!"]
```

or, if you wish to pass in a per-item document `id` or `language`/`country_hint`, they can be passed as a list of
Expand All @@ -147,7 +147,7 @@ or, if you wish to pass in a per-item document `id` or `language`/`country_hint`
or a dict-like representation of the object:

```python
inputs = [
documents = [
{"id": "1", "language": "en", "text": "I hated the movie. It was so slow!"},
{"id": "2", "language": "en", "text": "The movie made it into my top ten favorites."},
{"id": "3", "language": "en", "text": "What a great movie!"}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def __init__(self, endpoint, credential, **kwargs):
@distributed_trace
def detect_language( # type: ignore
self,
inputs, # type: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]]
documents, # type: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]]
**kwargs # type: Any
):
# type: (...) -> List[Union[DetectLanguageResult, DocumentError]]
Expand All @@ -108,12 +108,12 @@ def detect_language( # type: ignore
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param inputs: The set of documents to process as part of this batch.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and country_hint on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.DetectLanguageInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.DetectLanguageInput`, like
`{"id": "1", "country_hint": "us", "text": "hello world"}`.
:type inputs:
:type documents:
list[str] or list[~azure.ai.textanalytics.DetectLanguageInput]
:keyword str country_hint: A country hint for the entire batch. Accepts two
letter country codes specified by ISO 3166-1 alpha-2. Per-document
Expand Down Expand Up @@ -142,7 +142,7 @@ def detect_language( # type: ignore
"""
country_hint_arg = kwargs.pop("country_hint", None)
country_hint = country_hint_arg if country_hint_arg is not None else self._default_country_hint
docs = _validate_batch_input(inputs, "country_hint", country_hint)
docs = _validate_batch_input(documents, "country_hint", country_hint)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
Expand All @@ -159,7 +159,7 @@ def detect_language( # type: ignore
@distributed_trace
def recognize_entities( # type: ignore
self,
inputs, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
**kwargs # type: Any
):
# type: (...) -> List[Union[RecognizeEntitiesResult, DocumentError]]
Expand All @@ -172,12 +172,12 @@ def recognize_entities( # type: ignore
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param inputs: The set of documents to process as part of this batch.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list
of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`,
like `{"id": "1", "language": "en", "text": "hello world"}`.
:type inputs:
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
Expand Down Expand Up @@ -206,7 +206,7 @@ def recognize_entities( # type: ignore
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(inputs, "language", language)
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
Expand All @@ -223,7 +223,7 @@ def recognize_entities( # type: ignore
@distributed_trace
def recognize_pii_entities( # type: ignore
self,
inputs, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
**kwargs # type: Any
):
# type: (...) -> List[Union[RecognizePiiEntitiesResult, DocumentError]]
Expand All @@ -236,12 +236,12 @@ def recognize_pii_entities( # type: ignore
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param inputs: The set of documents to process as part of this batch.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type inputs:
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
Expand Down Expand Up @@ -270,7 +270,7 @@ def recognize_pii_entities( # type: ignore
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(inputs, "language", language)
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
Expand All @@ -287,7 +287,7 @@ def recognize_pii_entities( # type: ignore
@distributed_trace
def recognize_linked_entities( # type: ignore
self,
inputs, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
**kwargs # type: Any
):
# type: (...) -> List[Union[RecognizeLinkedEntitiesResult, DocumentError]]
Expand All @@ -301,12 +301,12 @@ def recognize_linked_entities( # type: ignore
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param inputs: The set of documents to process as part of this batch.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type inputs:
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
Expand Down Expand Up @@ -335,7 +335,7 @@ def recognize_linked_entities( # type: ignore
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(inputs, "language", language)
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
Expand All @@ -352,7 +352,7 @@ def recognize_linked_entities( # type: ignore
@distributed_trace
def extract_key_phrases( # type: ignore
self,
inputs, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
**kwargs # type: Any
):
# type: (...) -> List[Union[ExtractKeyPhrasesResult, DocumentError]]
Expand All @@ -366,12 +366,12 @@ def extract_key_phrases( # type: ignore
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param inputs: The set of documents to process as part of this batch.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type inputs:
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
Expand Down Expand Up @@ -400,7 +400,7 @@ def extract_key_phrases( # type: ignore
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(inputs, "language", language)
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
Expand All @@ -417,7 +417,7 @@ def extract_key_phrases( # type: ignore
@distributed_trace
def analyze_sentiment( # type: ignore
self,
inputs, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
**kwargs # type: Any
):
# type: (...) -> List[Union[AnalyzeSentimentResult, DocumentError]]
Expand All @@ -430,12 +430,12 @@ def analyze_sentiment( # type: ignore
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param inputs: The set of documents to process as part of this batch.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type inputs:
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
Expand Down Expand Up @@ -464,7 +464,7 @@ def analyze_sentiment( # type: ignore
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(inputs, "language", language)
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
Expand Down
Loading

0 comments on commit 6317e09

Please sign in to comment.