From 7bcb0f97e76b5033a282d24613b3479a5f863fba Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Fri, 15 Sep 2017 14:32:03 -0700 Subject: [PATCH] Add video intelligence v1beta2. --- packages/video-intelligence/package.json | 2 +- .../v1beta1/video_intelligence.proto | 340 ++++++++++ .../v1beta2/video_intelligence.proto | 390 +++++++++++ packages/video-intelligence/src/index.js | 73 +- .../v1beta1/doc/doc_google_protobuf_any.js | 10 + .../src/v1beta1/doc/doc_google_rpc_status.js | 8 +- .../v1beta2/doc/doc_google_protobuf_any.js | 131 ++++ .../doc/doc_google_protobuf_duration.js | 97 +++ .../src/v1beta2/doc/doc_google_rpc_status.js | 92 +++ .../src/v1beta2/doc/doc_video_intelligence.js | 631 ++++++++++++++++++ .../video-intelligence/src/v1beta2/index.js | 34 + .../video_intelligence_service_client.js | 291 ++++++++ ...eo_intelligence_service_client_config.json | 31 + .../video-intelligence/test/gapic-v1beta2.js | 113 ++++ 14 files changed, 2225 insertions(+), 18 deletions(-) create mode 100644 packages/video-intelligence/proto/google/cloud/videointelligence/v1beta1/video_intelligence.proto create mode 100644 packages/video-intelligence/proto/google/cloud/videointelligence/v1beta2/video_intelligence.proto create mode 100644 packages/video-intelligence/src/v1beta2/doc/doc_google_protobuf_any.js create mode 100644 packages/video-intelligence/src/v1beta2/doc/doc_google_protobuf_duration.js create mode 100644 packages/video-intelligence/src/v1beta2/doc/doc_google_rpc_status.js create mode 100644 packages/video-intelligence/src/v1beta2/doc/doc_video_intelligence.js create mode 100644 packages/video-intelligence/src/v1beta2/index.js create mode 100644 packages/video-intelligence/src/v1beta2/video_intelligence_service_client.js create mode 100644 packages/video-intelligence/src/v1beta2/video_intelligence_service_client_config.json create mode 100644 packages/video-intelligence/test/gapic-v1beta2.js diff --git a/packages/video-intelligence/package.json b/packages/video-intelligence/package.json index 95481fd281fc..f56956f02da4 100644 --- a/packages/video-intelligence/package.json +++ b/packages/video-intelligence/package.json @@ -25,7 +25,7 @@ ], "dependencies": { "extend": "^3.0", - "google-gax": "^0.13.2", + "google-gax": "^0.13.5", "google-proto-files": "^0.12.0" }, "devDependencies": { diff --git a/packages/video-intelligence/proto/google/cloud/videointelligence/v1beta1/video_intelligence.proto b/packages/video-intelligence/proto/google/cloud/videointelligence/v1beta1/video_intelligence.proto new file mode 100644 index 000000000000..0b9703462d70 --- /dev/null +++ b/packages/video-intelligence/proto/google/cloud/videointelligence/v1beta1/video_intelligence.proto @@ -0,0 +1,340 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.videointelligence.v1beta1; + +import "google/api/annotations.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.VideoIntelligence.V1Beta1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1;videointelligence"; +option java_multiple_files = true; +option java_outer_classname = "VideoIntelligenceServiceProto"; +option java_package = "com.google.cloud.videointelligence.v1beta1"; + + +// Service that implements Google Cloud Video Intelligence API. +service VideoIntelligenceService { + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { post: "/v1beta1/videos:annotate" body: "*" }; + } +} + +// Video annotation request. +message AnnotateVideoRequest { + // Input video location. Currently, only + // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` should be unset. + string input_uri = 1; + + // The video data bytes. Encoding: base64. If unset, the input video(s) + // should be specified via `input_uri`. If set, `input_uri` should be unset. + string input_content = 6; + + // Requested video annotation features. + repeated Feature features = 2; + + // Additional video context and/or feature-specific parameters. + VideoContext video_context = 3; + + // Optional location where the output (in JSON format) should be stored. + // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + string output_uri = 4; + + // Optional cloud region where annotation should take place. Supported cloud + // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + // is specified, a region will be determined based on video file location. + string location_id = 5; +} + +// Video context and/or feature-specific parameters. +message VideoContext { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video + // is treated as a single segment. + repeated VideoSegment segments = 1; + + // If label detection has been requested, what labels should be detected + // in addition to video-level labels or segment-level labels. If unspecified, + // defaults to `SHOT_MODE`. + LabelDetectionMode label_detection_mode = 2; + + // Whether the video has been shot from a stationary (i.e. non-moving) camera. + // When set to true, might improve detection accuracy for moving objects. + bool stationary_camera = 3; + + // Model to use for label detection. + // Supported values: "latest" and "stable" (the default). + string label_detection_model = 4; + + // Model to use for face detection. + // Supported values: "latest" and "stable" (the default). + string face_detection_model = 5; + + // Model to use for shot change detection. + // Supported values: "latest" and "stable" (the default). + string shot_change_detection_model = 6; + + // Model to use for safe search detection. + // Supported values: "latest" and "stable" (the default). + string safe_search_detection_model = 7; +} + +// Video segment. +message VideoSegment { + // Start offset in microseconds (inclusive). Unset means 0. + int64 start_time_offset = 1; + + // End offset in microseconds (inclusive). Unset means 0. + int64 end_time_offset = 2; +} + +// Label location. +message LabelLocation { + // Video segment. Set to [-1, -1] for video-level labels. + // Set to [timestamp, timestamp] for frame-level labels. + // Otherwise, corresponds to one of `AnnotateSpec.segments` + // (if specified) or to shot boundaries (if requested). + VideoSegment segment = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; + + // Label level. + LabelLevel level = 3; +} + +// Label annotation. +message LabelAnnotation { + // Textual description, e.g. `Fixed-gear bicycle`. + string description = 1; + + // Language code for `description` in BCP-47 format. + string language_code = 2; + + // Where the label was detected and with what confidence. + repeated LabelLocation locations = 3; +} + +// Safe search annotation (based on per-frame visual signals only). +// If no unsafe content has been detected in a frame, no annotations +// are present for that frame. If only some types of unsafe content +// have been detected in a frame, the likelihood is set to `UNKNOWN` +// for all other types of unsafe content. +message SafeSearchAnnotation { + // Likelihood of adult content. + Likelihood adult = 1; + + // Likelihood that an obvious modification was made to the original + // version to make it appear funny or offensive. + Likelihood spoof = 2; + + // Likelihood of medical content. + Likelihood medical = 3; + + // Likelihood of violent content. + Likelihood violent = 4; + + // Likelihood of racy content. + Likelihood racy = 5; + + // Video time offset in microseconds. + int64 time_offset = 6; +} + +// Bounding box. +message BoundingBox { + // Left X coordinate. + int32 left = 1; + + // Right X coordinate. + int32 right = 2; + + // Bottom Y coordinate. + int32 bottom = 3; + + // Top Y coordinate. + int32 top = 4; +} + +// Face location. +message FaceLocation { + // Bounding box in a frame. + BoundingBox bounding_box = 1; + + // Video time offset in microseconds. + int64 time_offset = 2; +} + +// Face annotation. +message FaceAnnotation { + // Thumbnail of a representative face view (in JPEG format). Encoding: base64. + string thumbnail = 1; + + // All locations where a face was detected. + // Faces are detected and tracked on a per-video basis + // (as opposed to across multiple videos). + repeated VideoSegment segments = 2; + + // Face locations at one frame per second. + repeated FaceLocation locations = 3; +} + +// Annotation results for a single video. +message VideoAnnotationResults { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Label annotations. There is exactly one element for each unique label. + repeated LabelAnnotation label_annotations = 2; + + // Face annotations. There is exactly one element for each unique face. + repeated FaceAnnotation face_annotations = 3; + + // Shot annotations. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 4; + + // Safe search annotations. + repeated SafeSearchAnnotation safe_search_annotations = 6; + + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + google.rpc.Status error = 5; +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoResponse { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationResults annotation_results = 1; +} + +// Annotation progress for a single video. +message VideoAnnotationProgress { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Approximate percentage processed thus far. + // Guaranteed to be 100 when fully processed. + int32 progress_percent = 2; + + // Time when the request was received. + google.protobuf.Timestamp start_time = 3; + + // Time of the most recent update. + google.protobuf.Timestamp update_time = 4; +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoProgress { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationProgress annotation_progress = 1; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Human face detection and tracking. + FACE_DETECTION = 2; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 3; + + // Safe search detection. + SAFE_SEARCH_DETECTION = 4; +} + +// Label level (scope). +enum LabelLevel { + // Unspecified. + LABEL_LEVEL_UNSPECIFIED = 0; + + // Video-level. Corresponds to the whole video. + VIDEO_LEVEL = 1; + + // Segment-level. Corresponds to one of `AnnotateSpec.segments`. + SEGMENT_LEVEL = 2; + + // Shot-level. Corresponds to a single shot (i.e. a series of frames + // without a major camera position or background change). + SHOT_LEVEL = 3; + + // Frame-level. Corresponds to a single video frame. + FRAME_LEVEL = 4; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unknown likelihood. + UNKNOWN = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} diff --git a/packages/video-intelligence/proto/google/cloud/videointelligence/v1beta2/video_intelligence.proto b/packages/video-intelligence/proto/google/cloud/videointelligence/v1beta2/video_intelligence.proto new file mode 100644 index 000000000000..b7ff5af6a2e6 --- /dev/null +++ b/packages/video-intelligence/proto/google/cloud/videointelligence/v1beta2/video_intelligence.proto @@ -0,0 +1,390 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.videointelligence.v1beta2; + +import "google/api/annotations.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.VideoIntelligence.V1beta2"; +option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2;videointelligence"; +option java_multiple_files = true; +option java_outer_classname = "VideoIntelligenceServiceProto"; +option java_package = "com.google.cloud.videointelligence.v1beta2"; + + +// Service that implements Google Cloud Video Intelligence API. +service VideoIntelligenceService { + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { post: "/v1beta2/videos:annotate" body: "*" }; + } +} + +// Video annotation request. +message AnnotateVideoRequest { + // Input video location. Currently, only + // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` should be unset. + string input_uri = 1; + + // The video data bytes. Encoding: base64. If unset, the input video(s) + // should be specified via `input_uri`. If set, `input_uri` should be unset. + bytes input_content = 6; + + // Requested video annotation features. + repeated Feature features = 2; + + // Additional video context and/or feature-specific parameters. + VideoContext video_context = 3; + + // Optional location where the output (in JSON format) should be stored. + // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + string output_uri = 4; + + // Optional cloud region where annotation should take place. Supported cloud + // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + // is specified, a region will be determined based on video file location. + string location_id = 5; +} + +// Video context and/or feature-specific parameters. +message VideoContext { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video + // is treated as a single segment. + repeated VideoSegment segments = 1; + + // Config for LABEL_DETECTION. + LabelDetectionConfig label_detection_config = 2; + + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig shot_change_detection_config = 3; + + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig explicit_content_detection_config = 4; + + // Config for FACE_DETECTION. + FaceDetectionConfig face_detection_config = 5; +} + +// Config for LABEL_DETECTION. +message LabelDetectionConfig { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode label_detection_mode = 1; + + // Whether the video has been shot from a stationary (i.e. non-moving) camera. + // When set to true, might improve detection accuracy for moving objects. + // Should be used with `SHOT_AND_FRAME_MODE` enabled. + bool stationary_camera = 2; + + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 3; +} + +// Config for SHOT_CHANGE_DETECTION. +message ShotChangeDetectionConfig { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for EXPLICIT_CONTENT_DETECTION. +message ExplicitContentDetectionConfig { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for FACE_DETECTION. +message FaceDetectionConfig { + // Model to use for face detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; + + // Whether bounding boxes be included in the face annotation output. + bool include_bounding_boxes = 2; +} + +// Video segment. +message VideoSegment { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + google.protobuf.Duration start_time_offset = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + google.protobuf.Duration end_time_offset = 2; +} + +// Video segment level annotation results for label detection. +message LabelSegment { + // Video segment where a label was detected. + VideoSegment segment = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Video frame level annotation results for label detection. +message LabelFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Detected entity from video analysis. +message Entity { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + string entity_id = 1; + + // Textual description, e.g. `Fixed-gear bicycle`. + string description = 2; + + // Language code for `description` in BCP-47 format. + string language_code = 3; +} + +// Label annotation. +message LabelAnnotation { + // Detected entity. + Entity entity = 1; + + // Common categories for the detected entity. + // E.g. when the label is `Terrier` the category is likely `dog`. And in some + // cases there might be more than one categories e.g. `Terrier` could also be + // a `pet`. + repeated Entity category_entities = 2; + + // All video segments where a label was detected. + repeated LabelSegment segments = 3; + + // All video frames where a label was detected. + repeated LabelFrame frames = 4; +} + +// Video frame level annotation results for explicit content. +message ExplicitContentFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Likelihood of the pornography content.. + Likelihood pornography_likelihood = 2; +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +message ExplicitContentAnnotation { + // All video frames where explicit content was detected. + repeated ExplicitContentFrame frames = 1; +} + +// Normalized bounding box. +// The normalized vertex coordinates are relative to the original image. +// Range: [0, 1]. +message NormalizedBoundingBox { + // Left X coordinate. + float left = 1; + + // Top Y coordinate. + float top = 2; + + // Right X coordinate. + float right = 3; + + // Bottom Y coordinate. + float bottom = 4; +} + +// Video segment level annotation results for face detection. +message FaceSegment { + // Video segment where a face was detected. + VideoSegment segment = 1; +} + +// Video frame level annotation results for face detection. +message FaceFrame { + // Normalized Bounding boxes in a frame. + // There can be more than one boxes if the same face is detected in multiple + // locations within the current frame. + repeated NormalizedBoundingBox normalized_bounding_boxes = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the video frame for this location. + google.protobuf.Duration time_offset = 2; +} + +// Face annotation. +message FaceAnnotation { + // Thumbnail of a representative face view (in JPEG format). Encoding: base64. + bytes thumbnail = 1; + + // All video segments where a face was detected. + repeated FaceSegment segments = 2; + + // All video frames where a face was detected. + repeated FaceFrame frames = 3; +} + +// Annotation results for a single video. +message VideoAnnotationResults { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Label annotations on video level or user specified segment level. + // There is exactly one element for each unique label. + repeated LabelAnnotation segment_label_annotations = 2; + + // Label annotations on shot level. + // There is exactly one element for each unique label. + repeated LabelAnnotation shot_label_annotations = 3; + + // Label annotations on frame level. + // There is exactly one element for each unique label. + repeated LabelAnnotation frame_label_annotations = 4; + + // Face annotations. There is exactly one element for each unique face. + repeated FaceAnnotation face_annotations = 5; + + // Shot annotations. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 6; + + // Explicit content annotation. + ExplicitContentAnnotation explicit_annotation = 7; + + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + google.rpc.Status error = 9; +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoResponse { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationResults annotation_results = 1; +} + +// Annotation progress for a single video. +message VideoAnnotationProgress { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Approximate percentage processed thus far. + // Guaranteed to be 100 when fully processed. + int32 progress_percent = 2; + + // Time when the request was received. + google.protobuf.Timestamp start_time = 3; + + // Time of the most recent update. + google.protobuf.Timestamp update_time = 4; +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoProgress { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationProgress annotation_progress = 1; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // Human face detection and tracking. + FACE_DETECTION = 4; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} diff --git a/packages/video-intelligence/src/index.js b/packages/video-intelligence/src/index.js index 1a3984bbae5b..af00255f7358 100644 --- a/packages/video-intelligence/src/index.js +++ b/packages/video-intelligence/src/index.js @@ -13,27 +13,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/*! - * @module video-intelligence - * @name VideoIntelligence - */ - 'use strict'; var extend = require('extend'); var gapic = { - v1beta1: require('./v1beta1') + v1beta1: require('./v1beta1'), + v1beta2: require('./v1beta2'), }; var gaxGrpc = require('google-gax').grpc(); const VERSION = require('../package.json').version; + /** - * Create a videoIntelligenceServiceClient with additional helpers for common + * Create an videoIntelligenceServiceClient with additional helpers for common * tasks. * - * Use this service to interact with the Google Cloud Video Intelligence API. + * Service that implements Google Cloud Video Intelligence API. * * @param {object=} options - [Configuration object](#/docs). * @param {object=} options.credentials - Credentials object. @@ -57,7 +53,6 @@ const VERSION = require('../package.json').version; * of native Promises. * @param {string=} options.servicePath - The domain name of the * API remote host. - * @resource [Cloud Video Intelligence](https://cloud.google.com/video-intelligence) */ function videoIntelligenceV1beta1(options) { // Define the header options. @@ -78,7 +73,59 @@ extend(v1beta1Protos, gaxGrpc.load([{ file: 'google/cloud/videointelligence/v1beta1/video_intelligence.proto' }]).google.cloud.videointelligence.v1beta1); -module.exports = videoIntelligenceV1beta1; -module.exports.types = v1beta1Protos; + +/** + * Create an videoIntelligenceServiceClient with additional helpers for common + * tasks. + * + * Service that implements Google Cloud Video Intelligence API. + * + * @param {object=} options - [Configuration object](#/docs). + * @param {object=} options.credentials - Credentials object. + * @param {string=} options.credentials.client_email + * @param {string=} options.credentials.private_key + * @param {string=} options.email - Account email address. Required when using a + * .pem or .p12 keyFilename. + * @param {string=} options.keyFilename - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option above is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number=} options.port - The port on which to connect to + * the remote host. + * @param {string=} options.projectId - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {function=} options.promise - Custom promise module to use instead + * of native Promises. + * @param {string=} options.servicePath - The domain name of the + * API remote host. + */ +function videoIntelligenceV1beta2(options) { + // Define the header options. + options = extend({}, options, { + libName: 'gccl', + libVersion: VERSION + }); + + // Create the client with the provided options. + var client = gapic.v1beta2(options).videoIntelligenceServiceClient(options); + return client; +} + +var v1beta2Protos = {}; + +extend(v1beta2Protos, gaxGrpc.load([{ + root: require('google-proto-files')('..'), + file: 'google/cloud/videointelligence/v1beta2/video_intelligence.proto' +}]).google.cloud.videointelligence.v1beta2); + + +module.exports = videoIntelligenceV1beta2; +module.exports.types = v1beta2Protos; module.exports.v1beta1 = videoIntelligenceV1beta1; -module.exports.v1beta1.types = v1beta1Protos; \ No newline at end of file +module.exports.v1beta1.types = v1beta1Protos; +module.exports.v1beta2 = videoIntelligenceV1beta2; +module.exports.v1beta2.types = v1beta2Protos; diff --git a/packages/video-intelligence/src/v1beta1/doc/doc_google_protobuf_any.js b/packages/video-intelligence/src/v1beta1/doc/doc_google_protobuf_any.js index 0697ec158142..92cce16fdae8 100644 --- a/packages/video-intelligence/src/v1beta1/doc/doc_google_protobuf_any.js +++ b/packages/video-intelligence/src/v1beta1/doc/doc_google_protobuf_any.js @@ -55,6 +55,16 @@ * any.Unpack(foo) * ... * + * Example 4: Pack and unpack a message in Go + * + * foo := &pb.Foo{...} + * any, err := ptypes.MarshalAny(foo) + * ... + * foo := &pb.Foo{} + * if err := ptypes.UnmarshalAny(any, foo); err != nil { + * ... + * } + * * The pack methods provided by protobuf library will by default use * 'type.googleapis.com/full.type.name' as the type URL and the unpack * methods only use the fully qualified type name after the last '/' diff --git a/packages/video-intelligence/src/v1beta1/doc/doc_google_rpc_status.js b/packages/video-intelligence/src/v1beta1/doc/doc_google_rpc_status.js index c85f1befe902..e2614e9e2f3c 100644 --- a/packages/video-intelligence/src/v1beta1/doc/doc_google_rpc_status.js +++ b/packages/video-intelligence/src/v1beta1/doc/doc_google_rpc_status.js @@ -37,7 +37,7 @@ * error message is needed, put the localized message in the error details or * localize it in the client. The optional error details may contain arbitrary * information about the error. There is a predefined set of error detail types - * in the package `google.rpc` which can be used for common error conditions. + * in the package `google.rpc` that can be used for common error conditions. * * # Language mapping * @@ -60,7 +60,7 @@ * errors. * * - Workflow errors. A typical workflow has multiple steps. Each step may - * have a `Status` message for error reporting purpose. + * have a `Status` message for error reporting. * * - Batch operations. If a client uses batch request and batch response, the * `Status` message should be used directly inside batch response, one for @@ -83,8 +83,8 @@ * {@link google.rpc.Status.details} field, or localized by the client. * * @property {Object[]} details - * A list of messages that carry the error details. There will be a - * common set of message types for APIs to use. + * A list of messages that carry the error details. There is a common set of + * message types for APIs to use. * * This object should have the same structure as [google.protobuf.Any]{@link external:"google.protobuf.Any"} * diff --git a/packages/video-intelligence/src/v1beta2/doc/doc_google_protobuf_any.js b/packages/video-intelligence/src/v1beta2/doc/doc_google_protobuf_any.js new file mode 100644 index 000000000000..92cce16fdae8 --- /dev/null +++ b/packages/video-intelligence/src/v1beta2/doc/doc_google_protobuf_any.js @@ -0,0 +1,131 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Note: this file is purely for documentation. Any contents are not expected + * to be loaded as the JS file. + */ + +/** + * `Any` contains an arbitrary serialized protocol buffer message along with a + * URL that describes the type of the serialized message. + * + * Protobuf library provides support to pack/unpack Any values in the form + * of utility functions or additional generated methods of the Any type. + * + * Example 1: Pack and unpack a message in C++. + * + * Foo foo = ...; + * Any any; + * any.PackFrom(foo); + * ... + * if (any.UnpackTo(&foo)) { + * ... + * } + * + * Example 2: Pack and unpack a message in Java. + * + * Foo foo = ...; + * Any any = Any.pack(foo); + * ... + * if (any.is(Foo.class)) { + * foo = any.unpack(Foo.class); + * } + * + * Example 3: Pack and unpack a message in Python. + * + * foo = Foo(...) + * any = Any() + * any.Pack(foo) + * ... + * if any.Is(Foo.DESCRIPTOR): + * any.Unpack(foo) + * ... + * + * Example 4: Pack and unpack a message in Go + * + * foo := &pb.Foo{...} + * any, err := ptypes.MarshalAny(foo) + * ... + * foo := &pb.Foo{} + * if err := ptypes.UnmarshalAny(any, foo); err != nil { + * ... + * } + * + * The pack methods provided by protobuf library will by default use + * 'type.googleapis.com/full.type.name' as the type URL and the unpack + * methods only use the fully qualified type name after the last '/' + * in the type URL, for example "foo.bar.com/x/y.z" will yield type + * name "y.z". + * + * + * # JSON + * + * The JSON representation of an `Any` value uses the regular + * representation of the deserialized, embedded message, with an + * additional field `@type` which contains the type URL. Example: + * + * package google.profile; + * message Person { + * string first_name = 1; + * string last_name = 2; + * } + * + * { + * "@type": "type.googleapis.com/google.profile.Person", + * "firstName": , + * "lastName": + * } + * + * If the embedded message type is well-known and has a custom JSON + * representation, that representation will be embedded adding a field + * `value` which holds the custom JSON in addition to the `@type` + * field. Example (for message {@link google.protobuf.Duration}): + * + * { + * "@type": "type.googleapis.com/google.protobuf.Duration", + * "value": "1.212s" + * } + * + * @external "google.protobuf.Any" + * @property {string} typeUrl + * A URL/resource name whose content describes the type of the + * serialized protocol buffer message. + * + * For URLs which use the scheme `http`, `https`, or no scheme, the + * following restrictions and interpretations apply: + * + * * If no scheme is provided, `https` is assumed. + * * The last segment of the URL's path must represent the fully + * qualified name of the type (as in `path/google.protobuf.Duration`). + * The name should be in a canonical form (e.g., leading "." is + * not accepted). + * * An HTTP GET on the URL must yield a {@link google.protobuf.Type} + * value in binary format, or produce an error. + * * Applications are allowed to cache lookup results based on the + * URL, or have them precompiled into a binary to avoid any + * lookup. Therefore, binary compatibility needs to be preserved + * on changes to types. (Use versioned type names to manage + * breaking changes.) + * + * Schemes other than `http`, `https` (or the empty scheme) might be + * used with implementation specific semantics. + * + * @property {string} value + * Must be a valid serialized protocol buffer of the above specified type. + * + * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto} + */ \ No newline at end of file diff --git a/packages/video-intelligence/src/v1beta2/doc/doc_google_protobuf_duration.js b/packages/video-intelligence/src/v1beta2/doc/doc_google_protobuf_duration.js new file mode 100644 index 000000000000..d4c9bfee5176 --- /dev/null +++ b/packages/video-intelligence/src/v1beta2/doc/doc_google_protobuf_duration.js @@ -0,0 +1,97 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Note: this file is purely for documentation. Any contents are not expected + * to be loaded as the JS file. + */ + +/** + * A Duration represents a signed, fixed-length span of time represented + * as a count of seconds and fractions of seconds at nanosecond + * resolution. It is independent of any calendar and concepts like "day" + * or "month". It is related to Timestamp in that the difference between + * two Timestamp values is a Duration and it can be added or subtracted + * from a Timestamp. Range is approximately +-10,000 years. + * + * # Examples + * + * Example 1: Compute Duration from two Timestamps in pseudo code. + * + * Timestamp start = ...; + * Timestamp end = ...; + * Duration duration = ...; + * + * duration.seconds = end.seconds - start.seconds; + * duration.nanos = end.nanos - start.nanos; + * + * if (duration.seconds < 0 && duration.nanos > 0) { + * duration.seconds += 1; + * duration.nanos -= 1000000000; + * } else if (durations.seconds > 0 && duration.nanos < 0) { + * duration.seconds -= 1; + * duration.nanos += 1000000000; + * } + * + * Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. + * + * Timestamp start = ...; + * Duration duration = ...; + * Timestamp end = ...; + * + * end.seconds = start.seconds + duration.seconds; + * end.nanos = start.nanos + duration.nanos; + * + * if (end.nanos < 0) { + * end.seconds -= 1; + * end.nanos += 1000000000; + * } else if (end.nanos >= 1000000000) { + * end.seconds += 1; + * end.nanos -= 1000000000; + * } + * + * Example 3: Compute Duration from datetime.timedelta in Python. + * + * td = datetime.timedelta(days=3, minutes=10) + * duration = Duration() + * duration.FromTimedelta(td) + * + * # JSON Mapping + * + * In JSON format, the Duration type is encoded as a string rather than an + * object, where the string ends in the suffix "s" (indicating seconds) and + * is preceded by the number of seconds, with nanoseconds expressed as + * fractional seconds. For example, 3 seconds with 0 nanoseconds should be + * encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should + * be expressed in JSON format as "3.000000001s", and 3 seconds and 1 + * microsecond should be expressed in JSON format as "3.000001s". + * + * @external "google.protobuf.Duration" + * @property {number} seconds + * Signed seconds of the span of time. Must be from -315,576,000,000 + * to +315,576,000,000 inclusive. Note: these bounds are computed from: + * 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + * + * @property {number} nanos + * Signed fractions of a second at nanosecond resolution of the span + * of time. Durations less than one second are represented with a 0 + * `seconds` field and a positive or negative `nanos` field. For durations + * of one second or more, a non-zero value for the `nanos` field must be + * of the same sign as the `seconds` field. Must be from -999,999,999 + * to +999,999,999 inclusive. + * + * @see [google.protobuf.Duration definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/duration.proto} + */ \ No newline at end of file diff --git a/packages/video-intelligence/src/v1beta2/doc/doc_google_rpc_status.js b/packages/video-intelligence/src/v1beta2/doc/doc_google_rpc_status.js new file mode 100644 index 000000000000..e2614e9e2f3c --- /dev/null +++ b/packages/video-intelligence/src/v1beta2/doc/doc_google_rpc_status.js @@ -0,0 +1,92 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Note: this file is purely for documentation. Any contents are not expected + * to be loaded as the JS file. + */ + +/** + * The `Status` type defines a logical error model that is suitable for different + * programming environments, including REST APIs and RPC APIs. It is used by + * [gRPC](https://github.com/grpc). The error model is designed to be: + * + * - Simple to use and understand for most users + * - Flexible enough to meet unexpected needs + * + * # Overview + * + * The `Status` message contains three pieces of data: error code, error message, + * and error details. The error code should be an enum value of + * {@link google.rpc.Code}, but it may accept additional error codes if needed. The + * error message should be a developer-facing English message that helps + * developers *understand* and *resolve* the error. If a localized user-facing + * error message is needed, put the localized message in the error details or + * localize it in the client. The optional error details may contain arbitrary + * information about the error. There is a predefined set of error detail types + * in the package `google.rpc` that can be used for common error conditions. + * + * # Language mapping + * + * The `Status` message is the logical representation of the error model, but it + * is not necessarily the actual wire format. When the `Status` message is + * exposed in different client libraries and different wire protocols, it can be + * mapped differently. For example, it will likely be mapped to some exceptions + * in Java, but more likely mapped to some error codes in C. + * + * # Other uses + * + * The error model and the `Status` message can be used in a variety of + * environments, either with or without APIs, to provide a + * consistent developer experience across different environments. + * + * Example uses of this error model include: + * + * - Partial errors. If a service needs to return partial errors to the client, + * it may embed the `Status` in the normal response to indicate the partial + * errors. + * + * - Workflow errors. A typical workflow has multiple steps. Each step may + * have a `Status` message for error reporting. + * + * - Batch operations. If a client uses batch request and batch response, the + * `Status` message should be used directly inside batch response, one for + * each error sub-response. + * + * - Asynchronous operations. If an API call embeds asynchronous operation + * results in its response, the status of those operations should be + * represented directly using the `Status` message. + * + * - Logging. If some API errors are stored in logs, the message `Status` could + * be used directly after any stripping needed for security/privacy reasons. + * + * @external "google.rpc.Status" + * @property {number} code + * The status code, which should be an enum value of {@link google.rpc.Code}. + * + * @property {string} message + * A developer-facing error message, which should be in English. Any + * user-facing error message should be localized and sent in the + * {@link google.rpc.Status.details} field, or localized by the client. + * + * @property {Object[]} details + * A list of messages that carry the error details. There is a common set of + * message types for APIs to use. + * + * This object should have the same structure as [google.protobuf.Any]{@link external:"google.protobuf.Any"} + * + * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto} + */ \ No newline at end of file diff --git a/packages/video-intelligence/src/v1beta2/doc/doc_video_intelligence.js b/packages/video-intelligence/src/v1beta2/doc/doc_video_intelligence.js new file mode 100644 index 000000000000..30077bc21ff7 --- /dev/null +++ b/packages/video-intelligence/src/v1beta2/doc/doc_video_intelligence.js @@ -0,0 +1,631 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Note: this file is purely for documentation. Any contents are not expected + * to be loaded as the JS file. + */ + +/** + * Video annotation request. + * + * @property {string} inputUri + * Input video location. Currently, only + * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + * supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT}). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * A video URI may include wildcards in `object-id`, and thus identify + * multiple videos. Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` should be unset. + * + * @property {string} inputContent + * The video data bytes. Encoding: base64. If unset, the input video(s) + * should be specified via `input_uri`. If set, `input_uri` should be unset. + * + * @property {number[]} features + * Requested video annotation features. + * + * The number should be among the values of [Feature]{@link Feature} + * + * @property {Object} videoContext + * Additional video context and/or feature-specific parameters. + * + * This object should have the same structure as [VideoContext]{@link VideoContext} + * + * @property {string} outputUri + * Optional location where the output (in JSON format) should be stored. + * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + * URIs are supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT}). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * + * @property {string} locationId + * Optional cloud region where annotation should take place. Supported cloud + * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + * is specified, a region will be determined based on video file location. + * + * @class + * @see [google.cloud.videointelligence.v1beta2.AnnotateVideoRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var AnnotateVideoRequest = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video context and/or feature-specific parameters. + * + * @property {Object[]} segments + * Video segments to annotate. The segments may overlap and are not required + * to be contiguous or span the whole video. If unspecified, each video + * is treated as a single segment. + * + * This object should have the same structure as [VideoSegment]{@link VideoSegment} + * + * @property {Object} labelDetectionConfig + * Config for LABEL_DETECTION. + * + * This object should have the same structure as [LabelDetectionConfig]{@link LabelDetectionConfig} + * + * @property {Object} shotChangeDetectionConfig + * Config for SHOT_CHANGE_DETECTION. + * + * This object should have the same structure as [ShotChangeDetectionConfig]{@link ShotChangeDetectionConfig} + * + * @property {Object} explicitContentDetectionConfig + * Config for EXPLICIT_CONTENT_DETECTION. + * + * This object should have the same structure as [ExplicitContentDetectionConfig]{@link ExplicitContentDetectionConfig} + * + * @property {Object} faceDetectionConfig + * Config for FACE_DETECTION. + * + * This object should have the same structure as [FaceDetectionConfig]{@link FaceDetectionConfig} + * + * @class + * @see [google.cloud.videointelligence.v1beta2.VideoContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var VideoContext = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for LABEL_DETECTION. + * + * @property {number} labelDetectionMode + * What labels should be detected with LABEL_DETECTION, in addition to + * video-level labels or segment-level labels. + * If unspecified, defaults to `SHOT_MODE`. + * + * The number should be among the values of [LabelDetectionMode]{@link LabelDetectionMode} + * + * @property {boolean} stationaryCamera + * Whether the video has been shot from a stationary (i.e. non-moving) camera. + * When set to true, might improve detection accuracy for moving objects. + * Should be used with `SHOT_AND_FRAME_MODE` enabled. + * + * @property {string} model + * Model to use for label detection. + * Supported values: "builtin/stable" (the default if unset) and + * "builtin/latest". + * + * @class + * @see [google.cloud.videointelligence.v1beta2.LabelDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var LabelDetectionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for SHOT_CHANGE_DETECTION. + * + * @property {string} model + * Model to use for shot change detection. + * Supported values: "builtin/stable" (the default if unset) and + * "builtin/latest". + * + * @class + * @see [google.cloud.videointelligence.v1beta2.ShotChangeDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var ShotChangeDetectionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for EXPLICIT_CONTENT_DETECTION. + * + * @property {string} model + * Model to use for explicit content detection. + * Supported values: "builtin/stable" (the default if unset) and + * "builtin/latest". + * + * @class + * @see [google.cloud.videointelligence.v1beta2.ExplicitContentDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var ExplicitContentDetectionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for FACE_DETECTION. + * + * @property {string} model + * Model to use for face detection. + * Supported values: "builtin/stable" (the default if unset) and + * "builtin/latest". + * + * @property {boolean} includeBoundingBoxes + * Whether bounding boxes be included in the face annotation output. + * + * @class + * @see [google.cloud.videointelligence.v1beta2.FaceDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var FaceDetectionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video segment. + * + * @property {Object} startTimeOffset + * Time-offset, relative to the beginning of the video, + * corresponding to the start of the segment (inclusive). + * + * This object should have the same structure as [google.protobuf.Duration]{@link external:"google.protobuf.Duration"} + * + * @property {Object} endTimeOffset + * Time-offset, relative to the beginning of the video, + * corresponding to the end of the segment (inclusive). + * + * This object should have the same structure as [google.protobuf.Duration]{@link external:"google.protobuf.Duration"} + * + * @class + * @see [google.cloud.videointelligence.v1beta2.VideoSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var VideoSegment = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video segment level annotation results for label detection. + * + * @property {Object} segment + * Video segment where a label was detected. + * + * This object should have the same structure as [VideoSegment]{@link VideoSegment} + * + * @property {number} confidence + * Confidence that the label is accurate. Range: [0, 1]. + * + * @class + * @see [google.cloud.videointelligence.v1beta2.LabelSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var LabelSegment = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video frame level annotation results for label detection. + * + * @property {Object} timeOffset + * Time-offset, relative to the beginning of the video, corresponding to the + * video frame for this location. + * + * This object should have the same structure as [google.protobuf.Duration]{@link external:"google.protobuf.Duration"} + * + * @property {number} confidence + * Confidence that the label is accurate. Range: [0, 1]. + * + * @class + * @see [google.cloud.videointelligence.v1beta2.LabelFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var LabelFrame = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Detected entity from video analysis. + * + * @property {string} entityId + * Opaque entity ID. Some IDs may be available in + * [Google Knowledge Graph Search + * API](https://developers.google.com/knowledge-graph/). + * + * @property {string} description + * Textual description, e.g. `Fixed-gear bicycle`. + * + * @property {string} languageCode + * Language code for `description` in BCP-47 format. + * + * @class + * @see [google.cloud.videointelligence.v1beta2.Entity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var Entity = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Label annotation. + * + * @property {Object} entity + * Detected entity. + * + * This object should have the same structure as [Entity]{@link Entity} + * + * @property {Object[]} categoryEntities + * Common categories for the detected entity. + * E.g. when the label is `Terrier` the category is likely `dog`. And in some + * cases there might be more than one categories e.g. `Terrier` could also be + * a `pet`. + * + * This object should have the same structure as [Entity]{@link Entity} + * + * @property {Object[]} segments + * All video segments where a label was detected. + * + * This object should have the same structure as [LabelSegment]{@link LabelSegment} + * + * @property {Object[]} frames + * All video frames where a label was detected. + * + * This object should have the same structure as [LabelFrame]{@link LabelFrame} + * + * @class + * @see [google.cloud.videointelligence.v1beta2.LabelAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var LabelAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video frame level annotation results for explicit content. + * + * @property {Object} timeOffset + * Time-offset, relative to the beginning of the video, corresponding to the + * video frame for this location. + * + * This object should have the same structure as [google.protobuf.Duration]{@link external:"google.protobuf.Duration"} + * + * @property {number} pornographyLikelihood + * Likelihood of the pornography content.. + * + * The number should be among the values of [Likelihood]{@link Likelihood} + * + * @class + * @see [google.cloud.videointelligence.v1beta2.ExplicitContentFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var ExplicitContentFrame = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Explicit content annotation (based on per-frame visual signals only). + * If no explicit content has been detected in a frame, no annotations are + * present for that frame. + * + * @property {Object[]} frames + * All video frames where explicit content was detected. + * + * This object should have the same structure as [ExplicitContentFrame]{@link ExplicitContentFrame} + * + * @class + * @see [google.cloud.videointelligence.v1beta2.ExplicitContentAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var ExplicitContentAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Normalized bounding box. + * The normalized vertex coordinates are relative to the original image. + * Range: [0, 1]. + * + * @property {number} left + * Left X coordinate. + * + * @property {number} top + * Top Y coordinate. + * + * @property {number} right + * Right X coordinate. + * + * @property {number} bottom + * Bottom Y coordinate. + * + * @class + * @see [google.cloud.videointelligence.v1beta2.NormalizedBoundingBox definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var NormalizedBoundingBox = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video segment level annotation results for face detection. + * + * @property {Object} segment + * Video segment where a face was detected. + * + * This object should have the same structure as [VideoSegment]{@link VideoSegment} + * + * @class + * @see [google.cloud.videointelligence.v1beta2.FaceSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var FaceSegment = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video frame level annotation results for face detection. + * + * @property {Object[]} normalizedBoundingBoxes + * Normalized Bounding boxes in a frame. + * There can be more than one boxes if the same face is detected in multiple + * locations within the current frame. + * + * This object should have the same structure as [NormalizedBoundingBox]{@link NormalizedBoundingBox} + * + * @property {Object} timeOffset + * Time-offset, relative to the beginning of the video, + * corresponding to the video frame for this location. + * + * This object should have the same structure as [google.protobuf.Duration]{@link external:"google.protobuf.Duration"} + * + * @class + * @see [google.cloud.videointelligence.v1beta2.FaceFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var FaceFrame = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Face annotation. + * + * @property {string} thumbnail + * Thumbnail of a representative face view (in JPEG format). Encoding: base64. + * + * @property {Object[]} segments + * All video segments where a face was detected. + * + * This object should have the same structure as [FaceSegment]{@link FaceSegment} + * + * @property {Object[]} frames + * All video frames where a face was detected. + * + * This object should have the same structure as [FaceFrame]{@link FaceFrame} + * + * @class + * @see [google.cloud.videointelligence.v1beta2.FaceAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var FaceAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Annotation results for a single video. + * + * @property {string} inputUri + * Video file location in + * [Google Cloud Storage](https://cloud.google.com/storage/). + * + * @property {Object[]} segmentLabelAnnotations + * Label annotations on video level or user specified segment level. + * There is exactly one element for each unique label. + * + * This object should have the same structure as [LabelAnnotation]{@link LabelAnnotation} + * + * @property {Object[]} shotLabelAnnotations + * Label annotations on shot level. + * There is exactly one element for each unique label. + * + * This object should have the same structure as [LabelAnnotation]{@link LabelAnnotation} + * + * @property {Object[]} frameLabelAnnotations + * Label annotations on frame level. + * There is exactly one element for each unique label. + * + * This object should have the same structure as [LabelAnnotation]{@link LabelAnnotation} + * + * @property {Object[]} faceAnnotations + * Face annotations. There is exactly one element for each unique face. + * + * This object should have the same structure as [FaceAnnotation]{@link FaceAnnotation} + * + * @property {Object[]} shotAnnotations + * Shot annotations. Each shot is represented as a video segment. + * + * This object should have the same structure as [VideoSegment]{@link VideoSegment} + * + * @property {Object} explicitAnnotation + * Explicit content annotation. + * + * This object should have the same structure as [ExplicitContentAnnotation]{@link ExplicitContentAnnotation} + * + * @property {Object} error + * If set, indicates an error. Note that for a single `AnnotateVideoRequest` + * some videos may succeed and some may fail. + * + * This object should have the same structure as [google.rpc.Status]{@link external:"google.rpc.Status"} + * + * @class + * @see [google.cloud.videointelligence.v1beta2.VideoAnnotationResults definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var VideoAnnotationResults = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video annotation response. Included in the `response` + * field of the `Operation` returned by the `GetOperation` + * call of the `google::longrunning::Operations` service. + * + * @property {Object[]} annotationResults + * Annotation results for all videos specified in `AnnotateVideoRequest`. + * + * This object should have the same structure as [VideoAnnotationResults]{@link VideoAnnotationResults} + * + * @class + * @see [google.cloud.videointelligence.v1beta2.AnnotateVideoResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var AnnotateVideoResponse = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Annotation progress for a single video. + * + * @property {string} inputUri + * Video file location in + * [Google Cloud Storage](https://cloud.google.com/storage/). + * + * @property {number} progressPercent + * Approximate percentage processed thus far. + * Guaranteed to be 100 when fully processed. + * + * @property {Object} startTime + * Time when the request was received. + * + * This object should have the same structure as [google.protobuf.Timestamp]{@link external:"google.protobuf.Timestamp"} + * + * @property {Object} updateTime + * Time of the most recent update. + * + * This object should have the same structure as [google.protobuf.Timestamp]{@link external:"google.protobuf.Timestamp"} + * + * @class + * @see [google.cloud.videointelligence.v1beta2.VideoAnnotationProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var VideoAnnotationProgress = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video annotation progress. Included in the `metadata` + * field of the `Operation` returned by the `GetOperation` + * call of the `google::longrunning::Operations` service. + * + * @property {Object[]} annotationProgress + * Progress metadata for all videos specified in `AnnotateVideoRequest`. + * + * This object should have the same structure as [VideoAnnotationProgress]{@link VideoAnnotationProgress} + * + * @class + * @see [google.cloud.videointelligence.v1beta2.AnnotateVideoProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto} + */ +var AnnotateVideoProgress = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video annotation feature. + * + * @enum {number} + */ +var Feature = { + + /** + * Unspecified. + */ + FEATURE_UNSPECIFIED: 0, + + /** + * Label detection. Detect objects, such as dog or flower. + */ + LABEL_DETECTION: 1, + + /** + * Shot change detection. + */ + SHOT_CHANGE_DETECTION: 2, + + /** + * Explicit content detection. + */ + EXPLICIT_CONTENT_DETECTION: 3, + + /** + * Human face detection and tracking. + */ + FACE_DETECTION: 4 +}; + +/** + * Label detection mode. + * + * @enum {number} + */ +var LabelDetectionMode = { + + /** + * Unspecified. + */ + LABEL_DETECTION_MODE_UNSPECIFIED: 0, + + /** + * Detect shot-level labels. + */ + SHOT_MODE: 1, + + /** + * Detect frame-level labels. + */ + FRAME_MODE: 2, + + /** + * Detect both shot-level and frame-level labels. + */ + SHOT_AND_FRAME_MODE: 3 +}; + +/** + * Bucketized representation of likelihood. + * + * @enum {number} + */ +var Likelihood = { + + /** + * Unspecified likelihood. + */ + LIKELIHOOD_UNSPECIFIED: 0, + + /** + * Very unlikely. + */ + VERY_UNLIKELY: 1, + + /** + * Unlikely. + */ + UNLIKELY: 2, + + /** + * Possible. + */ + POSSIBLE: 3, + + /** + * Likely. + */ + LIKELY: 4, + + /** + * Very likely. + */ + VERY_LIKELY: 5 +}; \ No newline at end of file diff --git a/packages/video-intelligence/src/v1beta2/index.js b/packages/video-intelligence/src/v1beta2/index.js new file mode 100644 index 000000000000..64f694bddd15 --- /dev/null +++ b/packages/video-intelligence/src/v1beta2/index.js @@ -0,0 +1,34 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +'use strict'; + +var videoIntelligenceServiceClient = require('./video_intelligence_service_client'); +var gax = require('google-gax'); +var extend = require('extend'); + +function v1beta2(options) { + options = extend({ + scopes: v1beta2.ALL_SCOPES + }, options); + var gaxGrpc = gax.grpc(options); + return videoIntelligenceServiceClient(gaxGrpc); +} + +v1beta2.GAPIC_VERSION = '0.0.5'; +v1beta2.SERVICE_ADDRESS = videoIntelligenceServiceClient.SERVICE_ADDRESS; +v1beta2.ALL_SCOPES = videoIntelligenceServiceClient.ALL_SCOPES; + +module.exports = v1beta2; \ No newline at end of file diff --git a/packages/video-intelligence/src/v1beta2/video_intelligence_service_client.js b/packages/video-intelligence/src/v1beta2/video_intelligence_service_client.js new file mode 100644 index 000000000000..0d27ec50afc2 --- /dev/null +++ b/packages/video-intelligence/src/v1beta2/video_intelligence_service_client.js @@ -0,0 +1,291 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * EDITING INSTRUCTIONS + * This file was generated from the file + * https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1beta2/video_intelligence.proto, + * and updates to that file get reflected here through a refresh process. + * For the short term, the refresh process will only be runnable by Google + * engineers. + * + * The only allowed edits are to method and file documentation. A 3-way + * merge preserves those additions if the generated source changes. + */ +/* TODO: introduce line-wrapping so that it never exceeds the limit. */ +/* jscs: disable maximumLineLength */ +'use strict'; + +var configData = require('./video_intelligence_service_client_config'); +var extend = require('extend'); +var gax = require('google-gax'); + +var SERVICE_ADDRESS = 'videointelligence.googleapis.com'; + +var DEFAULT_SERVICE_PORT = 443; + +var CODE_GEN_NAME_VERSION = 'gapic/0.0.5'; + +/** + * The scopes needed to make gRPC calls to all of the methods defined in + * this service. + */ +var ALL_SCOPES = [ + 'https://www.googleapis.com/auth/cloud-platform' +]; + +/** + * Service that implements Google Cloud Video Intelligence API. + * + * + * @class + */ +function VideoIntelligenceServiceClient(gaxGrpc, grpcClients, opts) { + opts = extend({ + servicePath: SERVICE_ADDRESS, + port: DEFAULT_SERVICE_PORT, + clientConfig: {} + }, opts); + + var googleApiClient = [ + 'gl-node/' + process.versions.node + ]; + if (opts.libName && opts.libVersion) { + googleApiClient.push(opts.libName + '/' + opts.libVersion); + } + googleApiClient.push( + CODE_GEN_NAME_VERSION, + 'gax/' + gax.version, + 'grpc/' + gaxGrpc.grpcVersion + ); + + this.operationsClient = new gax.lro({ + auth: gaxGrpc.auth, + grpc: gaxGrpc.grpc + }).operationsClient(opts); + + this.longrunningDescriptors = { + annotateVideo: new gax.LongrunningDescriptor( + this.operationsClient, + grpcClients.google.cloud.videointelligence.v1beta2.AnnotateVideoResponse.decode, + grpcClients.google.cloud.videointelligence.v1beta2.AnnotateVideoProgress.decode) + }; + + var defaults = gaxGrpc.constructSettings( + 'google.cloud.videointelligence.v1beta2.VideoIntelligenceService', + configData, + opts.clientConfig, + {'x-goog-api-client': googleApiClient.join(' ')}); + + var self = this; + + this.auth = gaxGrpc.auth; + var videoIntelligenceServiceStub = gaxGrpc.createStub( + grpcClients.google.cloud.videointelligence.v1beta2.VideoIntelligenceService, + opts); + var videoIntelligenceServiceStubMethods = [ + 'annotateVideo' + ]; + videoIntelligenceServiceStubMethods.forEach(function(methodName) { + self['_' + methodName] = gax.createApiCall( + videoIntelligenceServiceStub.then(function(videoIntelligenceServiceStub) { + return function() { + var args = Array.prototype.slice.call(arguments, 0); + return videoIntelligenceServiceStub[methodName].apply(videoIntelligenceServiceStub, args); + }; + }), + defaults[methodName], + self.longrunningDescriptors[methodName]); + }); +} + + +/** + * Get the project ID used by this class. + * @param {function(Error, string)} callback - the callback to be called with + * the current project Id. + */ +VideoIntelligenceServiceClient.prototype.getProjectId = function(callback) { + return this.auth.getProjectId(callback); +}; + +// Service calls + +/** + * Performs asynchronous video annotation. Progress and results can be + * retrieved through the `google.longrunning.Operations` interface. + * `Operation.metadata` contains `AnnotateVideoProgress` (progress). + * `Operation.response` contains `AnnotateVideoResponse` (results). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.inputUri + * Input video location. Currently, only + * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + * supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT}). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * A video URI may include wildcards in `object-id`, and thus identify + * multiple videos. Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` should be unset. + * @param {number[]} request.features + * Requested video annotation features. + * + * The number should be among the values of [Feature]{@link Feature} + * @param {string=} request.inputContent + * The video data bytes. Encoding: base64. If unset, the input video(s) + * should be specified via `input_uri`. If set, `input_uri` should be unset. + * @param {Object=} request.videoContext + * Additional video context and/or feature-specific parameters. + * + * This object should have the same structure as [VideoContext]{@link VideoContext} + * @param {string=} request.outputUri + * Optional location where the output (in JSON format) should be stored. + * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + * URIs are supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT}). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * @param {string=} request.locationId + * Optional cloud region where annotation should take place. Supported cloud + * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + * is specified, a region will be determined based on video file location. + * @param {Object=} options + * Optional parameters. You can override the default settings for this call, e.g, timeout, + * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details. + * @param {function(?Error, ?Object)=} callback + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/Operation} object. + * @return {Promise} - The promise which resolves to an array. + * The first element of the array is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/Operation} object. + * The promise has a method named "cancel" which cancels the ongoing API call. + * + * @example + * + * var videoIntelligence = require('@google-cloud/video-intelligence'); + * + * var client = videoIntelligence.v1beta2({ + * // optional auth parameters. + * }); + * + * var inputUri = ''; + * var features = []; + * var request = { + * inputUri: inputUri, + * features: features + * }; + * + * // Handle the operation using the promise pattern. + * client.annotateVideo(request).then(function(responses) { + * var operation = responses[0]; + * var initialApiResponse = responses[1]; + * + * // Operation#promise starts polling for the completion of the LRO. + * return operation.promise(); + * }).then(function(responses) { + * // The final result of the operation. + * var result = responses[0]; + * + * // The metadata value of the completed operation. + * var metadata = responses[1]; + * + * // The response of the api call returning the complete operation. + * var finalApiResponse = responses[2]; + * }) + * .catch(function(err) { + * console.error(err); + * }); + * + * var inputUri = ''; + * var features = []; + * var request = { + * inputUri: inputUri, + * features: features + * }; + * + * // Handle the operation using the event emitter pattern. + * client.annotateVideo(request).then(function(responses) { + * var operation = responses[0]; + * var initialApiResponse = responses[1]; + * + * // Adding a listener for the "complete" event starts polling for the + * // completion of the operation. + * operation.on('complete', function(result, metadata, finalApiResponse) { + * // doSomethingWith(result); + * }); + * + * // Adding a listener for the "progress" event causes the callback to be + * // called on any change in metadata when the operation is polled. + * operation.on('progress', function(metadata, apiResponse) { + * // doSomethingWith(metadata) + * }) + * + * // Adding a listener for the "error" event handles any errors found during polling. + * operation.on('error', function(err) { + * // throw(err); + * }) + * }) + * .catch(function(err) { + * console.error(err); + * }); + */ +VideoIntelligenceServiceClient.prototype.annotateVideo = function(request, options, callback) { + if (options instanceof Function && callback === undefined) { + callback = options; + options = {}; + } + if (options === undefined) { + options = {}; + } + + return this._annotateVideo(request, options, callback); +}; + +function VideoIntelligenceServiceClientBuilder(gaxGrpc) { + if (!(this instanceof VideoIntelligenceServiceClientBuilder)) { + return new VideoIntelligenceServiceClientBuilder(gaxGrpc); + } + + var videoIntelligenceServiceClient = gaxGrpc.load([{ + root: require('google-proto-files')('..'), + file: 'google/cloud/videointelligence/v1beta2/video_intelligence.proto' + }]); + extend(this, videoIntelligenceServiceClient.google.cloud.videointelligence.v1beta2); + + + /** + * Build a new instance of {@link VideoIntelligenceServiceClient}. + * + * @param {Object=} opts - The optional parameters. + * @param {String=} opts.servicePath + * The domain name of the API remote host. + * @param {number=} opts.port + * The port on which to connect to the remote host. + * @param {grpc.ClientCredentials=} opts.sslCreds + * A ClientCredentials for use with an SSL-enabled channel. + * @param {Object=} opts.clientConfig + * The customized config to build the call settings. See + * {@link gax.constructSettings} for the format. + */ + this.videoIntelligenceServiceClient = function(opts) { + return new VideoIntelligenceServiceClient(gaxGrpc, videoIntelligenceServiceClient, opts); + }; + extend(this.videoIntelligenceServiceClient, VideoIntelligenceServiceClient); +} +module.exports = VideoIntelligenceServiceClientBuilder; +module.exports.SERVICE_ADDRESS = SERVICE_ADDRESS; +module.exports.ALL_SCOPES = ALL_SCOPES; \ No newline at end of file diff --git a/packages/video-intelligence/src/v1beta2/video_intelligence_service_client_config.json b/packages/video-intelligence/src/v1beta2/video_intelligence_service_client_config.json new file mode 100644 index 000000000000..b52c46ea68c7 --- /dev/null +++ b/packages/video-intelligence/src/v1beta2/video_intelligence_service_client_config.json @@ -0,0 +1,31 @@ +{ + "interfaces": { + "google.cloud.videointelligence.v1beta2.VideoIntelligenceService": { + "retry_codes": { + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ], + "non_idempotent": [] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 1000, + "retry_delay_multiplier": 2.5, + "max_retry_delay_millis": 120000, + "initial_rpc_timeout_millis": 120000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 120000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "AnnotateVideo": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/packages/video-intelligence/test/gapic-v1beta2.js b/packages/video-intelligence/test/gapic-v1beta2.js new file mode 100644 index 000000000000..e8d196874dbd --- /dev/null +++ b/packages/video-intelligence/test/gapic-v1beta2.js @@ -0,0 +1,113 @@ +/* + * Copyright 2017, Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +'use strict'; + +var assert = require('assert'); +var videoIntelligence = require('../src'); + +var FAKE_STATUS_CODE = 1; +var error = new Error(); +error.code = FAKE_STATUS_CODE; + +describe('VideoIntelligenceServiceClient', function() { + describe('annotateVideo', function() { + it('invokes annotateVideo without error', function(done) { + var client = videoIntelligence.v1beta2(); + + // Mock request + var inputUri = 'inputUri1707300727'; + var features = []; + var request = { + inputUri : inputUri, + features : features + }; + + // Mock response + var expectedResponse = {}; + + // Mock Grpc layer + client._annotateVideo = mockLongRunningGrpcMethod(request, expectedResponse); + + client.annotateVideo(request).then(function(responses) { + var operation = responses[0]; + return operation.promise(); + }).then(function(responses) { + assert.deepStrictEqual(responses[0], expectedResponse); + done(); + }).catch(function(err) { + done(err); + }); + }); + + it('invokes annotateVideo with error', function(done) { + var client = videoIntelligence.v1beta2(); + + // Mock request + var inputUri = 'inputUri1707300727'; + var features = []; + var request = { + inputUri : inputUri, + features : features + }; + + // Mock Grpc layer + client._annotateVideo = mockLongRunningGrpcMethod(request, null, error); + + client.annotateVideo(request).then(function(responses) { + var operation = responses[0]; + return operation.promise(); + }).then(function(responses) { + assert.fail(); + }).catch(function(err) { + assert(err instanceof Error); + assert.equal(err.code, FAKE_STATUS_CODE); + done(); + }); + }); + }); + +}); + +function mockSimpleGrpcMethod(expectedRequest, response, error) { + return function(actualRequest, options, callback) { + assert.deepStrictEqual(actualRequest, expectedRequest); + if (error) { + callback(error); + } else if (response) { + callback(null, response); + } else { + callback(null); + } + }; +} + +function mockLongRunningGrpcMethod(expectedRequest, response, error) { + return function(request) { + assert.deepStrictEqual(request, expectedRequest); + var mockOperation = { + promise: function() { + return new Promise(function(resolve, reject) { + if (error) { + reject(error); + } else { + resolve([response]); + } + }); + } + }; + return Promise.resolve([mockOperation]); + }; +}