Skip to content

Commit

Permalink
[AutoPR cognitiveservices/data-plane/ComputerVision] Update Cognitive…
Browse files Browse the repository at this point in the history
…Service ComputerVision API to V2 (#2747)

* Generated from c2567da015994dac3301f495b6c5011d28ceaffe

Add post-processing directive to swap argument order for RecognizeText to position the url argument ahead of the mode argument.

autorest does not generate the intended order, so we add a post-processing directive,
for each target language,

* Generated from 973322cfd6ec2ced60a732f53b5318ceded5de7b

Fix validation errors

* Generated from 70efe042d607cbd973734e432da395ed35191a03

Add Spanish support for ServiceLanguage

This is for parity with V1.
  • Loading branch information
AutorestCI committed Jun 21, 2018
1 parent 49ab98b commit bee4351
Show file tree
Hide file tree
Showing 14 changed files with 56 additions and 58 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def __init__(
raise ValueError("Parameter 'azure_region' must not be None.")
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
base_url = 'https://{AzureRegion}.api.cognitive.microsoft.com/vision/v1.0'
base_url = 'https://{AzureRegion}.api.cognitive.microsoft.com/vision/v2.0'

super(ComputerVisionAPIConfiguration, self).__init__(base_url)

Expand Down Expand Up @@ -76,7 +76,7 @@ def __init__(
super(ComputerVisionAPI, self).__init__(self.config.credentials, self.config)

client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '1.0'
self.api_version = '2.0'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)

Expand Down Expand Up @@ -625,18 +625,18 @@ def analyze_image_by_domain(
analyze_image_by_domain.metadata = {'url': '/models/{model}/analyze'}

def recognize_text(
self, url, detect_handwriting=False, custom_headers=None, raw=False, **operation_config):
self, url, mode, custom_headers=None, raw=False, **operation_config):
"""Recognize Text operation. When you use the Recognize Text interface,
the response contains a field called 'Operation-Location'. The
'Operation-Location' field contains the URL that you must use for your
Get Handwritten Text Operation Result operation.
Get Recognize Text Operation Result operation.
:param mode: Type of text to recognize. Possible values include:
'Handwritten', 'Printed'
:type mode: str or
~azure.cognitiveservices.vision.computervision.models.TextRecognitionMode
:param url: Publicly reachable URL of an image
:type url: str
:param detect_handwriting: If 'true' is specified, handwriting
recognition is performed. If this parameter is set to 'false' or is
not specified, printed text recognition is performed.
:type detect_handwriting: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
Expand All @@ -658,8 +658,7 @@ def recognize_text(

# Construct parameters
query_parameters = {}
if detect_handwriting is not None:
query_parameters['detectHandwriting'] = self._serialize.query("detect_handwriting", detect_handwriting, 'bool')
query_parameters['mode'] = self._serialize.query("mode", mode, 'TextRecognitionMode')

# Construct headers
header_parameters = {}
Expand Down Expand Up @@ -693,7 +692,7 @@ def get_text_operation_result(
returned from Recognize Text interface.
:param operation_id: Id of the text operation returned in the response
of the 'Recognize Handwritten Text'
of the 'Recognize Text'
:type operation_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
Expand Down Expand Up @@ -1240,18 +1239,18 @@ def analyze_image_by_domain_in_stream(
analyze_image_by_domain_in_stream.metadata = {'url': '/models/{model}/analyze'}

def recognize_text_in_stream(
self, image, detect_handwriting=False, custom_headers=None, raw=False, callback=None, **operation_config):
self, image, mode, custom_headers=None, raw=False, callback=None, **operation_config):
"""Recognize Text operation. When you use the Recognize Text interface,
the response contains a field called 'Operation-Location'. The
'Operation-Location' field contains the URL that you must use for your
Get Handwritten Text Operation Result operation.
Get Recognize Text Operation Result operation.
:param image: An image stream.
:type image: Generator
:param detect_handwriting: If 'true' is specified, handwriting
recognition is performed. If this parameter is set to 'false' or is
not specified, printed text recognition is performed.
:type detect_handwriting: bool
:param mode: Type of text to recognize. Possible values include:
'Handwritten', 'Printed'
:type mode: str or
~azure.cognitiveservices.vision.computervision.models.TextRecognitionMode
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
Expand All @@ -1276,8 +1275,7 @@ def recognize_text_in_stream(

# Construct parameters
query_parameters = {}
if detect_handwriting is not None:
query_parameters['detectHandwriting'] = self._serialize.query("detect_handwriting", detect_handwriting, 'bool')
query_parameters['mode'] = self._serialize.query("mode", mode, 'TextRecognitionMode')

# Construct headers
header_parameters = {}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,17 @@
from .text_operation_result_py3 import TextOperationResult
from .face_rectangle_py3 import FaceRectangle
from .celebrities_model_py3 import CelebritiesModel
from .landmarks_model_py3 import LandmarksModel
from .category_detail_py3 import CategoryDetail
from .category_py3 import Category
from .adult_info_py3 import AdultInfo
from .color_info_py3 import ColorInfo
from .image_type_py3 import ImageType
from .image_tag_py3 import ImageTag
from .image_caption_py3 import ImageCaption
from .image_metadata_py3 import ImageMetadata
from .image_description_details_py3 import ImageDescriptionDetails
from .face_description_py3 import FaceDescription
from .image_metadata_py3 import ImageMetadata
from .image_analysis_py3 import ImageAnalysis
from .ocr_word_py3 import OcrWord
from .ocr_line_py3 import OcrLine
Expand All @@ -35,7 +36,6 @@
from .list_models_result_py3 import ListModelsResult
from .domain_model_results_py3 import DomainModelResults
from .celebrity_results_py3 import CelebrityResults
from .landmark_results_landmarks_item_py3 import LandmarkResultsLandmarksItem
from .landmark_results_py3 import LandmarkResults
from .image_description_py3 import ImageDescription
from .tag_result_py3 import TagResult
Expand All @@ -48,16 +48,17 @@
from .text_operation_result import TextOperationResult
from .face_rectangle import FaceRectangle
from .celebrities_model import CelebritiesModel
from .landmarks_model import LandmarksModel
from .category_detail import CategoryDetail
from .category import Category
from .adult_info import AdultInfo
from .color_info import ColorInfo
from .image_type import ImageType
from .image_tag import ImageTag
from .image_caption import ImageCaption
from .image_metadata import ImageMetadata
from .image_description_details import ImageDescriptionDetails
from .face_description import FaceDescription
from .image_metadata import ImageMetadata
from .image_analysis import ImageAnalysis
from .ocr_word import OcrWord
from .ocr_line import OcrLine
Expand All @@ -67,7 +68,6 @@
from .list_models_result import ListModelsResult
from .domain_model_results import DomainModelResults
from .celebrity_results import CelebrityResults
from .landmark_results_landmarks_item import LandmarkResultsLandmarksItem
from .landmark_results import LandmarkResults
from .image_description import ImageDescription
from .tag_result import TagResult
Expand All @@ -79,6 +79,7 @@
ComputerVisionErrorCodes,
VisualFeatureTypes,
OcrLanguages,
TextRecognitionMode,
AzureRegions,
Details,
)
Expand All @@ -90,16 +91,17 @@
'TextOperationResult',
'FaceRectangle',
'CelebritiesModel',
'LandmarksModel',
'CategoryDetail',
'Category',
'AdultInfo',
'ColorInfo',
'ImageType',
'ImageTag',
'ImageCaption',
'ImageMetadata',
'ImageDescriptionDetails',
'FaceDescription',
'ImageMetadata',
'ImageAnalysis',
'OcrWord',
'OcrLine',
Expand All @@ -109,7 +111,6 @@
'ListModelsResult',
'DomainModelResults',
'CelebrityResults',
'LandmarkResultsLandmarksItem',
'LandmarkResults',
'ImageDescription',
'TagResult',
Expand All @@ -120,6 +121,7 @@
'ComputerVisionErrorCodes',
'VisualFeatureTypes',
'OcrLanguages',
'TextRecognitionMode',
'AzureRegions',
'Details',
]
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,17 @@ class CategoryDetail(Model):
:param celebrities: An array of celebrities if any identified.
:type celebrities:
list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel]
:param landmarks: An array of landmarks if any identified.
:type landmarks:
list[~azure.cognitiveservices.vision.computervision.models.LandmarksModel]
"""

_attribute_map = {
'celebrities': {'key': 'celebrities', 'type': '[CelebritiesModel]'},
'landmarks': {'key': 'landmarks', 'type': '[LandmarksModel]'},
}

def __init__(self, **kwargs):
super(CategoryDetail, self).__init__(**kwargs)
self.celebrities = kwargs.get('celebrities', None)
self.landmarks = kwargs.get('landmarks', None)
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,17 @@ class CategoryDetail(Model):
:param celebrities: An array of celebrities if any identified.
:type celebrities:
list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel]
:param landmarks: An array of landmarks if any identified.
:type landmarks:
list[~azure.cognitiveservices.vision.computervision.models.LandmarksModel]
"""

_attribute_map = {
'celebrities': {'key': 'celebrities', 'type': '[CelebritiesModel]'},
'landmarks': {'key': 'landmarks', 'type': '[LandmarksModel]'},
}

def __init__(self, *, celebrities=None, **kwargs) -> None:
def __init__(self, *, celebrities=None, landmarks=None, **kwargs) -> None:
super(CategoryDetail, self).__init__(**kwargs)
self.celebrities = celebrities
self.landmarks = landmarks
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,12 @@ class OcrLanguages(str, Enum):
sk = "sk"


class TextRecognitionMode(str, Enum):

handwritten = "Handwritten"
printed = "Printed"


class AzureRegions(str, Enum):

westus = "westus"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ class ImageDescription(Model):
_attribute_map = {
'tags': {'key': 'description.tags', 'type': '[str]'},
'captions': {'key': 'description.captions', 'type': '[ImageCaption]'},
'request_id': {'key': 'description.requestId', 'type': 'str'},
'metadata': {'key': 'description.metadata', 'type': 'ImageMetadata'},
'request_id': {'key': 'requestId', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'ImageMetadata'},
}

def __init__(self, **kwargs):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,23 +21,14 @@ class ImageDescriptionDetails(Model):
:param captions: A list of captions, sorted by confidence level.
:type captions:
list[~azure.cognitiveservices.vision.computervision.models.ImageCaption]
:param request_id: Id of the REST API request.
:type request_id: str
:param metadata:
:type metadata:
~azure.cognitiveservices.vision.computervision.models.ImageMetadata
"""

_attribute_map = {
'tags': {'key': 'tags', 'type': '[str]'},
'captions': {'key': 'captions', 'type': '[ImageCaption]'},
'request_id': {'key': 'requestId', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'ImageMetadata'},
}

def __init__(self, **kwargs):
super(ImageDescriptionDetails, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.captions = kwargs.get('captions', None)
self.request_id = kwargs.get('request_id', None)
self.metadata = kwargs.get('metadata', None)
Original file line number Diff line number Diff line change
Expand Up @@ -21,23 +21,14 @@ class ImageDescriptionDetails(Model):
:param captions: A list of captions, sorted by confidence level.
:type captions:
list[~azure.cognitiveservices.vision.computervision.models.ImageCaption]
:param request_id: Id of the REST API request.
:type request_id: str
:param metadata:
:type metadata:
~azure.cognitiveservices.vision.computervision.models.ImageMetadata
"""

_attribute_map = {
'tags': {'key': 'tags', 'type': '[str]'},
'captions': {'key': 'captions', 'type': '[ImageCaption]'},
'request_id': {'key': 'requestId', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'ImageMetadata'},
}

def __init__(self, *, tags=None, captions=None, request_id: str=None, metadata=None, **kwargs) -> None:
def __init__(self, *, tags=None, captions=None, **kwargs) -> None:
super(ImageDescriptionDetails, self).__init__(**kwargs)
self.tags = tags
self.captions = captions
self.request_id = request_id
self.metadata = metadata
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ class ImageDescription(Model):
_attribute_map = {
'tags': {'key': 'description.tags', 'type': '[str]'},
'captions': {'key': 'description.captions', 'type': '[ImageCaption]'},
'request_id': {'key': 'description.requestId', 'type': 'str'},
'metadata': {'key': 'description.metadata', 'type': 'ImageMetadata'},
'request_id': {'key': 'requestId', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'ImageMetadata'},
}

def __init__(self, *, tags=None, captions=None, request_id: str=None, metadata=None, **kwargs) -> None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ class LandmarkResults(Model):
:param landmarks:
:type landmarks:
list[~azure.cognitiveservices.vision.computervision.models.LandmarkResultsLandmarksItem]
list[~azure.cognitiveservices.vision.computervision.models.LandmarksModel]
:param request_id: Id of the REST API request.
:type request_id: str
:param metadata:
Expand All @@ -26,7 +26,7 @@ class LandmarkResults(Model):
"""

_attribute_map = {
'landmarks': {'key': 'landmarks', 'type': '[LandmarkResultsLandmarksItem]'},
'landmarks': {'key': 'landmarks', 'type': '[LandmarksModel]'},
'request_id': {'key': 'requestId', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'ImageMetadata'},
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ class LandmarkResults(Model):
:param landmarks:
:type landmarks:
list[~azure.cognitiveservices.vision.computervision.models.LandmarkResultsLandmarksItem]
list[~azure.cognitiveservices.vision.computervision.models.LandmarksModel]
:param request_id: Id of the REST API request.
:type request_id: str
:param metadata:
Expand All @@ -26,7 +26,7 @@ class LandmarkResults(Model):
"""

_attribute_map = {
'landmarks': {'key': 'landmarks', 'type': '[LandmarkResultsLandmarksItem]'},
'landmarks': {'key': 'landmarks', 'type': '[LandmarksModel]'},
'request_id': {'key': 'requestId', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'ImageMetadata'},
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from msrest.serialization import Model


class LandmarkResultsLandmarksItem(Model):
class LandmarksModel(Model):
"""A landmark recognized in the image.
:param name: Name of the landmark.
Expand All @@ -27,6 +27,6 @@ class LandmarkResultsLandmarksItem(Model):
}

def __init__(self, **kwargs):
super(LandmarkResultsLandmarksItem, self).__init__(**kwargs)
super(LandmarksModel, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.confidence = kwargs.get('confidence', None)
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from msrest.serialization import Model


class LandmarkResultsLandmarksItem(Model):
class LandmarksModel(Model):
"""A landmark recognized in the image.
:param name: Name of the landmark.
Expand All @@ -27,6 +27,6 @@ class LandmarkResultsLandmarksItem(Model):
}

def __init__(self, *, name: str=None, confidence: float=None, **kwargs) -> None:
super(LandmarkResultsLandmarksItem, self).__init__(**kwargs)
super(LandmarksModel, self).__init__(**kwargs)
self.name = name
self.confidence = confidence
Original file line number Diff line number Diff line change
Expand Up @@ -9,5 +9,5 @@
# regenerated.
# --------------------------------------------------------------------------

VERSION = "1.0"
VERSION = "2.0"

0 comments on commit bee4351

Please sign in to comment.