diff --git a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1/video_intelligence.proto b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1/video_intelligence.proto index ee7d618fbc2..9dcfda55edd 100644 --- a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1/video_intelligence.proto +++ b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1/video_intelligence.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -148,6 +147,9 @@ enum Feature { // Object detection and tracking. OBJECT_TRACKING = 9; + + // Logo detection, tracking, and recognition. + LOGO_RECOGNITION = 12; } // Label detection mode. @@ -397,6 +399,67 @@ message FaceAnnotation { repeated FaceFrame frames = 3; } +// For tracking related features. +// An object at time_offset with attributes, and located with +// normalized_bounding_box. +message TimestampedObject { + // Normalized Bounding box in a frame, where the object is located. + NormalizedBoundingBox normalized_bounding_box = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the video frame for this object. + google.protobuf.Duration time_offset = 2; + + // Optional. The attributes of the object in the bounding box. + repeated DetectedAttribute attributes = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The detected landmarks. + repeated DetectedLandmark landmarks = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// A track of an object instance. +message Track { + // Video segment of a track. + VideoSegment segment = 1; + + // The object with timestamp and attributes per frame in the track. + repeated TimestampedObject timestamped_objects = 2; + + // Optional. Attributes in the track level. + repeated DetectedAttribute attributes = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The confidence score of the tracked object. + float confidence = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// A generic detected attribute represented by name in string format. +message DetectedAttribute { + // The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc. + // A full list of supported type names will be provided in the document. + string name = 1; + + // Detected attribute confidence. Range [0, 1]. + float confidence = 2; + + // Text value of the detection result. For example, the value for "HairColor" + // can be "black", "blonde", etc. + string value = 3; +} + +// A generic detected landmark represented by name in string format and a 2D +// location. +message DetectedLandmark { + // The name of this landmark, i.e. left_hand, right_shoulder. + string name = 1; + + // The 2D point of the detected landmark using the normalized image + // coordindate system. The normalized coordinates have the range from 0 to 1. + NormalizedVertex point = 2; + + // The confidence score of the detected landmark. Range [0, 1]. + float confidence = 3; +} + // Annotation results for a single video. message VideoAnnotationResults { // Video file location in @@ -453,6 +516,9 @@ message VideoAnnotationResults { // Annotations for list of objects detected and tracked in video. repeated ObjectTrackingAnnotation object_annotations = 14; + // Annotations for list of logos detected, tracked and recognized in video. + repeated LogoRecognitionAnnotation logo_recognition_annotations = 19; + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` // some videos may succeed and some may fail. google.rpc.Status error = 9; @@ -743,3 +809,18 @@ message ObjectTrackingAnnotation { // Streaming mode: it can only be one ObjectTrackingFrame message in frames. repeated ObjectTrackingFrame frames = 2; } + +// Annotation corresponding to one detected, tracked and recognized logo class. +message LogoRecognitionAnnotation { + // Entity category information to specify the logo class that all the logo + // tracks within this LogoRecognitionAnnotation are recognized as. + Entity entity = 1; + + // All logo tracks where the recognized logo appears. Each track corresponds + // to one logo instance appearing in consecutive frames. + repeated Track tracks = 2; + + // All video segments where the recognized logo appears. There might be + // multiple instances of the same logo class appearing in one VideoSegment. + repeated VideoSegment segments = 3; +} diff --git a/packages/google-cloud-videointelligence/protos/protos.d.ts b/packages/google-cloud-videointelligence/protos/protos.d.ts index 7be3137049e..202e9a8d51a 100644 --- a/packages/google-cloud-videointelligence/protos/protos.d.ts +++ b/packages/google-cloud-videointelligence/protos/protos.d.ts @@ -332,7 +332,8 @@ export namespace google { FACE_DETECTION = 4, SPEECH_TRANSCRIPTION = 6, TEXT_DETECTION = 7, - OBJECT_TRACKING = 9 + OBJECT_TRACKING = 9, + LOGO_RECOGNITION = 12 } /** LabelDetectionMode enum. */ @@ -2009,6 +2010,426 @@ export namespace google { public toJSON(): { [k: string]: any }; } + /** Properties of a TimestampedObject. */ + interface ITimestampedObject { + + /** TimestampedObject normalizedBoundingBox */ + normalizedBoundingBox?: (google.cloud.videointelligence.v1.INormalizedBoundingBox|null); + + /** TimestampedObject timeOffset */ + timeOffset?: (google.protobuf.IDuration|null); + + /** TimestampedObject attributes */ + attributes?: (google.cloud.videointelligence.v1.IDetectedAttribute[]|null); + + /** TimestampedObject landmarks */ + landmarks?: (google.cloud.videointelligence.v1.IDetectedLandmark[]|null); + } + + /** Represents a TimestampedObject. */ + class TimestampedObject implements ITimestampedObject { + + /** + * Constructs a new TimestampedObject. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.videointelligence.v1.ITimestampedObject); + + /** TimestampedObject normalizedBoundingBox. */ + public normalizedBoundingBox?: (google.cloud.videointelligence.v1.INormalizedBoundingBox|null); + + /** TimestampedObject timeOffset. */ + public timeOffset?: (google.protobuf.IDuration|null); + + /** TimestampedObject attributes. */ + public attributes: google.cloud.videointelligence.v1.IDetectedAttribute[]; + + /** TimestampedObject landmarks. */ + public landmarks: google.cloud.videointelligence.v1.IDetectedLandmark[]; + + /** + * Creates a new TimestampedObject instance using the specified properties. + * @param [properties] Properties to set + * @returns TimestampedObject instance + */ + public static create(properties?: google.cloud.videointelligence.v1.ITimestampedObject): google.cloud.videointelligence.v1.TimestampedObject; + + /** + * Encodes the specified TimestampedObject message. Does not implicitly {@link google.cloud.videointelligence.v1.TimestampedObject.verify|verify} messages. + * @param message TimestampedObject message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.videointelligence.v1.ITimestampedObject, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified TimestampedObject message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1.TimestampedObject.verify|verify} messages. + * @param message TimestampedObject message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.videointelligence.v1.ITimestampedObject, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a TimestampedObject message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns TimestampedObject + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.videointelligence.v1.TimestampedObject; + + /** + * Decodes a TimestampedObject message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns TimestampedObject + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.videointelligence.v1.TimestampedObject; + + /** + * Verifies a TimestampedObject message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a TimestampedObject message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns TimestampedObject + */ + public static fromObject(object: { [k: string]: any }): google.cloud.videointelligence.v1.TimestampedObject; + + /** + * Creates a plain object from a TimestampedObject message. Also converts values to other types if specified. + * @param message TimestampedObject + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.videointelligence.v1.TimestampedObject, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this TimestampedObject to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + + /** Properties of a Track. */ + interface ITrack { + + /** Track segment */ + segment?: (google.cloud.videointelligence.v1.IVideoSegment|null); + + /** Track timestampedObjects */ + timestampedObjects?: (google.cloud.videointelligence.v1.ITimestampedObject[]|null); + + /** Track attributes */ + attributes?: (google.cloud.videointelligence.v1.IDetectedAttribute[]|null); + + /** Track confidence */ + confidence?: (number|null); + } + + /** Represents a Track. */ + class Track implements ITrack { + + /** + * Constructs a new Track. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.videointelligence.v1.ITrack); + + /** Track segment. */ + public segment?: (google.cloud.videointelligence.v1.IVideoSegment|null); + + /** Track timestampedObjects. */ + public timestampedObjects: google.cloud.videointelligence.v1.ITimestampedObject[]; + + /** Track attributes. */ + public attributes: google.cloud.videointelligence.v1.IDetectedAttribute[]; + + /** Track confidence. */ + public confidence: number; + + /** + * Creates a new Track instance using the specified properties. + * @param [properties] Properties to set + * @returns Track instance + */ + public static create(properties?: google.cloud.videointelligence.v1.ITrack): google.cloud.videointelligence.v1.Track; + + /** + * Encodes the specified Track message. Does not implicitly {@link google.cloud.videointelligence.v1.Track.verify|verify} messages. + * @param message Track message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.videointelligence.v1.ITrack, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Track message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1.Track.verify|verify} messages. + * @param message Track message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.videointelligence.v1.ITrack, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Track message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Track + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.videointelligence.v1.Track; + + /** + * Decodes a Track message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Track + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.videointelligence.v1.Track; + + /** + * Verifies a Track message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a Track message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Track + */ + public static fromObject(object: { [k: string]: any }): google.cloud.videointelligence.v1.Track; + + /** + * Creates a plain object from a Track message. Also converts values to other types if specified. + * @param message Track + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.videointelligence.v1.Track, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this Track to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + + /** Properties of a DetectedAttribute. */ + interface IDetectedAttribute { + + /** DetectedAttribute name */ + name?: (string|null); + + /** DetectedAttribute confidence */ + confidence?: (number|null); + + /** DetectedAttribute value */ + value?: (string|null); + } + + /** Represents a DetectedAttribute. */ + class DetectedAttribute implements IDetectedAttribute { + + /** + * Constructs a new DetectedAttribute. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.videointelligence.v1.IDetectedAttribute); + + /** DetectedAttribute name. */ + public name: string; + + /** DetectedAttribute confidence. */ + public confidence: number; + + /** DetectedAttribute value. */ + public value: string; + + /** + * Creates a new DetectedAttribute instance using the specified properties. + * @param [properties] Properties to set + * @returns DetectedAttribute instance + */ + public static create(properties?: google.cloud.videointelligence.v1.IDetectedAttribute): google.cloud.videointelligence.v1.DetectedAttribute; + + /** + * Encodes the specified DetectedAttribute message. Does not implicitly {@link google.cloud.videointelligence.v1.DetectedAttribute.verify|verify} messages. + * @param message DetectedAttribute message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.videointelligence.v1.IDetectedAttribute, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DetectedAttribute message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1.DetectedAttribute.verify|verify} messages. + * @param message DetectedAttribute message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.videointelligence.v1.IDetectedAttribute, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DetectedAttribute message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DetectedAttribute + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.videointelligence.v1.DetectedAttribute; + + /** + * Decodes a DetectedAttribute message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DetectedAttribute + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.videointelligence.v1.DetectedAttribute; + + /** + * Verifies a DetectedAttribute message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DetectedAttribute message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DetectedAttribute + */ + public static fromObject(object: { [k: string]: any }): google.cloud.videointelligence.v1.DetectedAttribute; + + /** + * Creates a plain object from a DetectedAttribute message. Also converts values to other types if specified. + * @param message DetectedAttribute + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.videointelligence.v1.DetectedAttribute, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DetectedAttribute to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + + /** Properties of a DetectedLandmark. */ + interface IDetectedLandmark { + + /** DetectedLandmark name */ + name?: (string|null); + + /** DetectedLandmark point */ + point?: (google.cloud.videointelligence.v1.INormalizedVertex|null); + + /** DetectedLandmark confidence */ + confidence?: (number|null); + } + + /** Represents a DetectedLandmark. */ + class DetectedLandmark implements IDetectedLandmark { + + /** + * Constructs a new DetectedLandmark. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.videointelligence.v1.IDetectedLandmark); + + /** DetectedLandmark name. */ + public name: string; + + /** DetectedLandmark point. */ + public point?: (google.cloud.videointelligence.v1.INormalizedVertex|null); + + /** DetectedLandmark confidence. */ + public confidence: number; + + /** + * Creates a new DetectedLandmark instance using the specified properties. + * @param [properties] Properties to set + * @returns DetectedLandmark instance + */ + public static create(properties?: google.cloud.videointelligence.v1.IDetectedLandmark): google.cloud.videointelligence.v1.DetectedLandmark; + + /** + * Encodes the specified DetectedLandmark message. Does not implicitly {@link google.cloud.videointelligence.v1.DetectedLandmark.verify|verify} messages. + * @param message DetectedLandmark message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.videointelligence.v1.IDetectedLandmark, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DetectedLandmark message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1.DetectedLandmark.verify|verify} messages. + * @param message DetectedLandmark message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.videointelligence.v1.IDetectedLandmark, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DetectedLandmark message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DetectedLandmark + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.videointelligence.v1.DetectedLandmark; + + /** + * Decodes a DetectedLandmark message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DetectedLandmark + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.videointelligence.v1.DetectedLandmark; + + /** + * Verifies a DetectedLandmark message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DetectedLandmark message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DetectedLandmark + */ + public static fromObject(object: { [k: string]: any }): google.cloud.videointelligence.v1.DetectedLandmark; + + /** + * Creates a plain object from a DetectedLandmark message. Also converts values to other types if specified. + * @param message DetectedLandmark + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.videointelligence.v1.DetectedLandmark, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DetectedLandmark to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + /** Properties of a VideoAnnotationResults. */ interface IVideoAnnotationResults { @@ -2051,6 +2472,9 @@ export namespace google { /** VideoAnnotationResults objectAnnotations */ objectAnnotations?: (google.cloud.videointelligence.v1.IObjectTrackingAnnotation[]|null); + /** VideoAnnotationResults logoRecognitionAnnotations */ + logoRecognitionAnnotations?: (google.cloud.videointelligence.v1.ILogoRecognitionAnnotation[]|null); + /** VideoAnnotationResults error */ error?: (google.rpc.IStatus|null); } @@ -2103,6 +2527,9 @@ export namespace google { /** VideoAnnotationResults objectAnnotations. */ public objectAnnotations: google.cloud.videointelligence.v1.IObjectTrackingAnnotation[]; + /** VideoAnnotationResults logoRecognitionAnnotations. */ + public logoRecognitionAnnotations: google.cloud.videointelligence.v1.ILogoRecognitionAnnotation[]; + /** VideoAnnotationResults error. */ public error?: (google.rpc.IStatus|null); @@ -3709,6 +4136,108 @@ export namespace google { */ public toJSON(): { [k: string]: any }; } + + /** Properties of a LogoRecognitionAnnotation. */ + interface ILogoRecognitionAnnotation { + + /** LogoRecognitionAnnotation entity */ + entity?: (google.cloud.videointelligence.v1.IEntity|null); + + /** LogoRecognitionAnnotation tracks */ + tracks?: (google.cloud.videointelligence.v1.ITrack[]|null); + + /** LogoRecognitionAnnotation segments */ + segments?: (google.cloud.videointelligence.v1.IVideoSegment[]|null); + } + + /** Represents a LogoRecognitionAnnotation. */ + class LogoRecognitionAnnotation implements ILogoRecognitionAnnotation { + + /** + * Constructs a new LogoRecognitionAnnotation. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.videointelligence.v1.ILogoRecognitionAnnotation); + + /** LogoRecognitionAnnotation entity. */ + public entity?: (google.cloud.videointelligence.v1.IEntity|null); + + /** LogoRecognitionAnnotation tracks. */ + public tracks: google.cloud.videointelligence.v1.ITrack[]; + + /** LogoRecognitionAnnotation segments. */ + public segments: google.cloud.videointelligence.v1.IVideoSegment[]; + + /** + * Creates a new LogoRecognitionAnnotation instance using the specified properties. + * @param [properties] Properties to set + * @returns LogoRecognitionAnnotation instance + */ + public static create(properties?: google.cloud.videointelligence.v1.ILogoRecognitionAnnotation): google.cloud.videointelligence.v1.LogoRecognitionAnnotation; + + /** + * Encodes the specified LogoRecognitionAnnotation message. Does not implicitly {@link google.cloud.videointelligence.v1.LogoRecognitionAnnotation.verify|verify} messages. + * @param message LogoRecognitionAnnotation message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.videointelligence.v1.ILogoRecognitionAnnotation, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified LogoRecognitionAnnotation message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1.LogoRecognitionAnnotation.verify|verify} messages. + * @param message LogoRecognitionAnnotation message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.videointelligence.v1.ILogoRecognitionAnnotation, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a LogoRecognitionAnnotation message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns LogoRecognitionAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.videointelligence.v1.LogoRecognitionAnnotation; + + /** + * Decodes a LogoRecognitionAnnotation message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns LogoRecognitionAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.videointelligence.v1.LogoRecognitionAnnotation; + + /** + * Verifies a LogoRecognitionAnnotation message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a LogoRecognitionAnnotation message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns LogoRecognitionAnnotation + */ + public static fromObject(object: { [k: string]: any }): google.cloud.videointelligence.v1.LogoRecognitionAnnotation; + + /** + * Creates a plain object from a LogoRecognitionAnnotation message. Also converts values to other types if specified. + * @param message LogoRecognitionAnnotation + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.videointelligence.v1.LogoRecognitionAnnotation, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this LogoRecognitionAnnotation to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } } /** Namespace v1beta2. */ diff --git a/packages/google-cloud-videointelligence/protos/protos.js b/packages/google-cloud-videointelligence/protos/protos.js index 64e727108e2..57fe563d5a2 100644 --- a/packages/google-cloud-videointelligence/protos/protos.js +++ b/packages/google-cloud-videointelligence/protos/protos.js @@ -367,6 +367,7 @@ case 6: case 7: case 9: + case 12: break; } } @@ -442,6 +443,10 @@ case 9: message.features[i] = 9; break; + case "LOGO_RECOGNITION": + case 12: + message.features[i] = 12; + break; } } if (object.videoContext != null) { @@ -927,6 +932,7 @@ * @property {number} SPEECH_TRANSCRIPTION=6 SPEECH_TRANSCRIPTION value * @property {number} TEXT_DETECTION=7 TEXT_DETECTION value * @property {number} OBJECT_TRACKING=9 OBJECT_TRACKING value + * @property {number} LOGO_RECOGNITION=12 LOGO_RECOGNITION value */ v1.Feature = (function() { var valuesById = {}, values = Object.create(valuesById); @@ -938,6 +944,7 @@ values[valuesById[6] = "SPEECH_TRANSCRIPTION"] = 6; values[valuesById[7] = "TEXT_DETECTION"] = 7; values[valuesById[9] = "OBJECT_TRACKING"] = 9; + values[valuesById[12] = "LOGO_RECOGNITION"] = 12; return values; })(); @@ -4904,47 +4911,29 @@ return FaceAnnotation; })(); - v1.VideoAnnotationResults = (function() { + v1.TimestampedObject = (function() { /** - * Properties of a VideoAnnotationResults. + * Properties of a TimestampedObject. * @memberof google.cloud.videointelligence.v1 - * @interface IVideoAnnotationResults - * @property {string|null} [inputUri] VideoAnnotationResults inputUri - * @property {google.cloud.videointelligence.v1.IVideoSegment|null} [segment] VideoAnnotationResults segment - * @property {Array.|null} [segmentLabelAnnotations] VideoAnnotationResults segmentLabelAnnotations - * @property {Array.|null} [segmentPresenceLabelAnnotations] VideoAnnotationResults segmentPresenceLabelAnnotations - * @property {Array.|null} [shotLabelAnnotations] VideoAnnotationResults shotLabelAnnotations - * @property {Array.|null} [shotPresenceLabelAnnotations] VideoAnnotationResults shotPresenceLabelAnnotations - * @property {Array.|null} [frameLabelAnnotations] VideoAnnotationResults frameLabelAnnotations - * @property {Array.|null} [faceAnnotations] VideoAnnotationResults faceAnnotations - * @property {Array.|null} [shotAnnotations] VideoAnnotationResults shotAnnotations - * @property {google.cloud.videointelligence.v1.IExplicitContentAnnotation|null} [explicitAnnotation] VideoAnnotationResults explicitAnnotation - * @property {Array.|null} [speechTranscriptions] VideoAnnotationResults speechTranscriptions - * @property {Array.|null} [textAnnotations] VideoAnnotationResults textAnnotations - * @property {Array.|null} [objectAnnotations] VideoAnnotationResults objectAnnotations - * @property {google.rpc.IStatus|null} [error] VideoAnnotationResults error + * @interface ITimestampedObject + * @property {google.cloud.videointelligence.v1.INormalizedBoundingBox|null} [normalizedBoundingBox] TimestampedObject normalizedBoundingBox + * @property {google.protobuf.IDuration|null} [timeOffset] TimestampedObject timeOffset + * @property {Array.|null} [attributes] TimestampedObject attributes + * @property {Array.|null} [landmarks] TimestampedObject landmarks */ /** - * Constructs a new VideoAnnotationResults. + * Constructs a new TimestampedObject. * @memberof google.cloud.videointelligence.v1 - * @classdesc Represents a VideoAnnotationResults. - * @implements IVideoAnnotationResults + * @classdesc Represents a TimestampedObject. + * @implements ITimestampedObject * @constructor - * @param {google.cloud.videointelligence.v1.IVideoAnnotationResults=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1.ITimestampedObject=} [properties] Properties to set */ - function VideoAnnotationResults(properties) { - this.segmentLabelAnnotations = []; - this.segmentPresenceLabelAnnotations = []; - this.shotLabelAnnotations = []; - this.shotPresenceLabelAnnotations = []; - this.frameLabelAnnotations = []; - this.faceAnnotations = []; - this.shotAnnotations = []; - this.speechTranscriptions = []; - this.textAnnotations = []; - this.objectAnnotations = []; + function TimestampedObject(properties) { + this.attributes = []; + this.landmarks = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -4952,245 +4941,1356 @@ } /** - * VideoAnnotationResults inputUri. - * @member {string} inputUri - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults - * @instance - */ - VideoAnnotationResults.prototype.inputUri = ""; - - /** - * VideoAnnotationResults segment. - * @member {google.cloud.videointelligence.v1.IVideoSegment|null|undefined} segment - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults - * @instance - */ - VideoAnnotationResults.prototype.segment = null; - - /** - * VideoAnnotationResults segmentLabelAnnotations. - * @member {Array.} segmentLabelAnnotations - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults - * @instance - */ - VideoAnnotationResults.prototype.segmentLabelAnnotations = $util.emptyArray; - - /** - * VideoAnnotationResults segmentPresenceLabelAnnotations. - * @member {Array.} segmentPresenceLabelAnnotations - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults - * @instance - */ - VideoAnnotationResults.prototype.segmentPresenceLabelAnnotations = $util.emptyArray; - - /** - * VideoAnnotationResults shotLabelAnnotations. - * @member {Array.} shotLabelAnnotations - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults - * @instance - */ - VideoAnnotationResults.prototype.shotLabelAnnotations = $util.emptyArray; - - /** - * VideoAnnotationResults shotPresenceLabelAnnotations. - * @member {Array.} shotPresenceLabelAnnotations - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults - * @instance - */ - VideoAnnotationResults.prototype.shotPresenceLabelAnnotations = $util.emptyArray; - - /** - * VideoAnnotationResults frameLabelAnnotations. - * @member {Array.} frameLabelAnnotations - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults - * @instance - */ - VideoAnnotationResults.prototype.frameLabelAnnotations = $util.emptyArray; - - /** - * VideoAnnotationResults faceAnnotations. - * @member {Array.} faceAnnotations - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults - * @instance - */ - VideoAnnotationResults.prototype.faceAnnotations = $util.emptyArray; - - /** - * VideoAnnotationResults shotAnnotations. - * @member {Array.} shotAnnotations - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults - * @instance - */ - VideoAnnotationResults.prototype.shotAnnotations = $util.emptyArray; - - /** - * VideoAnnotationResults explicitAnnotation. - * @member {google.cloud.videointelligence.v1.IExplicitContentAnnotation|null|undefined} explicitAnnotation - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults - * @instance - */ - VideoAnnotationResults.prototype.explicitAnnotation = null; - - /** - * VideoAnnotationResults speechTranscriptions. - * @member {Array.} speechTranscriptions - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * TimestampedObject normalizedBoundingBox. + * @member {google.cloud.videointelligence.v1.INormalizedBoundingBox|null|undefined} normalizedBoundingBox + * @memberof google.cloud.videointelligence.v1.TimestampedObject * @instance */ - VideoAnnotationResults.prototype.speechTranscriptions = $util.emptyArray; + TimestampedObject.prototype.normalizedBoundingBox = null; /** - * VideoAnnotationResults textAnnotations. - * @member {Array.} textAnnotations - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * TimestampedObject timeOffset. + * @member {google.protobuf.IDuration|null|undefined} timeOffset + * @memberof google.cloud.videointelligence.v1.TimestampedObject * @instance */ - VideoAnnotationResults.prototype.textAnnotations = $util.emptyArray; + TimestampedObject.prototype.timeOffset = null; /** - * VideoAnnotationResults objectAnnotations. - * @member {Array.} objectAnnotations - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * TimestampedObject attributes. + * @member {Array.} attributes + * @memberof google.cloud.videointelligence.v1.TimestampedObject * @instance */ - VideoAnnotationResults.prototype.objectAnnotations = $util.emptyArray; + TimestampedObject.prototype.attributes = $util.emptyArray; /** - * VideoAnnotationResults error. - * @member {google.rpc.IStatus|null|undefined} error - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * TimestampedObject landmarks. + * @member {Array.} landmarks + * @memberof google.cloud.videointelligence.v1.TimestampedObject * @instance */ - VideoAnnotationResults.prototype.error = null; + TimestampedObject.prototype.landmarks = $util.emptyArray; /** - * Creates a new VideoAnnotationResults instance using the specified properties. + * Creates a new TimestampedObject instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @memberof google.cloud.videointelligence.v1.TimestampedObject * @static - * @param {google.cloud.videointelligence.v1.IVideoAnnotationResults=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1.VideoAnnotationResults} VideoAnnotationResults instance + * @param {google.cloud.videointelligence.v1.ITimestampedObject=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1.TimestampedObject} TimestampedObject instance */ - VideoAnnotationResults.create = function create(properties) { - return new VideoAnnotationResults(properties); + TimestampedObject.create = function create(properties) { + return new TimestampedObject(properties); }; /** - * Encodes the specified VideoAnnotationResults message. Does not implicitly {@link google.cloud.videointelligence.v1.VideoAnnotationResults.verify|verify} messages. + * Encodes the specified TimestampedObject message. Does not implicitly {@link google.cloud.videointelligence.v1.TimestampedObject.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @memberof google.cloud.videointelligence.v1.TimestampedObject * @static - * @param {google.cloud.videointelligence.v1.IVideoAnnotationResults} message VideoAnnotationResults message or plain object to encode + * @param {google.cloud.videointelligence.v1.ITimestampedObject} message TimestampedObject message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VideoAnnotationResults.encode = function encode(message, writer) { + TimestampedObject.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.inputUri != null && message.hasOwnProperty("inputUri")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.inputUri); - if (message.segmentLabelAnnotations != null && message.segmentLabelAnnotations.length) - for (var i = 0; i < message.segmentLabelAnnotations.length; ++i) - $root.google.cloud.videointelligence.v1.LabelAnnotation.encode(message.segmentLabelAnnotations[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.shotLabelAnnotations != null && message.shotLabelAnnotations.length) - for (var i = 0; i < message.shotLabelAnnotations.length; ++i) - $root.google.cloud.videointelligence.v1.LabelAnnotation.encode(message.shotLabelAnnotations[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.frameLabelAnnotations != null && message.frameLabelAnnotations.length) - for (var i = 0; i < message.frameLabelAnnotations.length; ++i) - $root.google.cloud.videointelligence.v1.LabelAnnotation.encode(message.frameLabelAnnotations[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.faceAnnotations != null && message.faceAnnotations.length) - for (var i = 0; i < message.faceAnnotations.length; ++i) - $root.google.cloud.videointelligence.v1.FaceAnnotation.encode(message.faceAnnotations[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.shotAnnotations != null && message.shotAnnotations.length) - for (var i = 0; i < message.shotAnnotations.length; ++i) - $root.google.cloud.videointelligence.v1.VideoSegment.encode(message.shotAnnotations[i], writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); - if (message.explicitAnnotation != null && message.hasOwnProperty("explicitAnnotation")) - $root.google.cloud.videointelligence.v1.ExplicitContentAnnotation.encode(message.explicitAnnotation, writer.uint32(/* id 7, wireType 2 =*/58).fork()).ldelim(); - if (message.error != null && message.hasOwnProperty("error")) - $root.google.rpc.Status.encode(message.error, writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); - if (message.segment != null && message.hasOwnProperty("segment")) - $root.google.cloud.videointelligence.v1.VideoSegment.encode(message.segment, writer.uint32(/* id 10, wireType 2 =*/82).fork()).ldelim(); - if (message.speechTranscriptions != null && message.speechTranscriptions.length) - for (var i = 0; i < message.speechTranscriptions.length; ++i) - $root.google.cloud.videointelligence.v1.SpeechTranscription.encode(message.speechTranscriptions[i], writer.uint32(/* id 11, wireType 2 =*/90).fork()).ldelim(); - if (message.textAnnotations != null && message.textAnnotations.length) - for (var i = 0; i < message.textAnnotations.length; ++i) - $root.google.cloud.videointelligence.v1.TextAnnotation.encode(message.textAnnotations[i], writer.uint32(/* id 12, wireType 2 =*/98).fork()).ldelim(); - if (message.objectAnnotations != null && message.objectAnnotations.length) - for (var i = 0; i < message.objectAnnotations.length; ++i) - $root.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.encode(message.objectAnnotations[i], writer.uint32(/* id 14, wireType 2 =*/114).fork()).ldelim(); - if (message.segmentPresenceLabelAnnotations != null && message.segmentPresenceLabelAnnotations.length) - for (var i = 0; i < message.segmentPresenceLabelAnnotations.length; ++i) - $root.google.cloud.videointelligence.v1.LabelAnnotation.encode(message.segmentPresenceLabelAnnotations[i], writer.uint32(/* id 23, wireType 2 =*/186).fork()).ldelim(); - if (message.shotPresenceLabelAnnotations != null && message.shotPresenceLabelAnnotations.length) - for (var i = 0; i < message.shotPresenceLabelAnnotations.length; ++i) - $root.google.cloud.videointelligence.v1.LabelAnnotation.encode(message.shotPresenceLabelAnnotations[i], writer.uint32(/* id 24, wireType 2 =*/194).fork()).ldelim(); + if (message.normalizedBoundingBox != null && message.hasOwnProperty("normalizedBoundingBox")) + $root.google.cloud.videointelligence.v1.NormalizedBoundingBox.encode(message.normalizedBoundingBox, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) + $root.google.protobuf.Duration.encode(message.timeOffset, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.attributes != null && message.attributes.length) + for (var i = 0; i < message.attributes.length; ++i) + $root.google.cloud.videointelligence.v1.DetectedAttribute.encode(message.attributes[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.landmarks != null && message.landmarks.length) + for (var i = 0; i < message.landmarks.length; ++i) + $root.google.cloud.videointelligence.v1.DetectedLandmark.encode(message.landmarks[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified VideoAnnotationResults message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1.VideoAnnotationResults.verify|verify} messages. + * Encodes the specified TimestampedObject message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1.TimestampedObject.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @memberof google.cloud.videointelligence.v1.TimestampedObject * @static - * @param {google.cloud.videointelligence.v1.IVideoAnnotationResults} message VideoAnnotationResults message or plain object to encode + * @param {google.cloud.videointelligence.v1.ITimestampedObject} message TimestampedObject message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VideoAnnotationResults.encodeDelimited = function encodeDelimited(message, writer) { + TimestampedObject.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VideoAnnotationResults message from the specified reader or buffer. + * Decodes a TimestampedObject message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @memberof google.cloud.videointelligence.v1.TimestampedObject * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1.VideoAnnotationResults} VideoAnnotationResults + * @returns {google.cloud.videointelligence.v1.TimestampedObject} TimestampedObject * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VideoAnnotationResults.decode = function decode(reader, length) { + TimestampedObject.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1.VideoAnnotationResults(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1.TimestampedObject(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.inputUri = reader.string(); - break; - case 10: - message.segment = $root.google.cloud.videointelligence.v1.VideoSegment.decode(reader, reader.uint32()); + message.normalizedBoundingBox = $root.google.cloud.videointelligence.v1.NormalizedBoundingBox.decode(reader, reader.uint32()); break; case 2: - if (!(message.segmentLabelAnnotations && message.segmentLabelAnnotations.length)) - message.segmentLabelAnnotations = []; - message.segmentLabelAnnotations.push($root.google.cloud.videointelligence.v1.LabelAnnotation.decode(reader, reader.uint32())); - break; - case 23: - if (!(message.segmentPresenceLabelAnnotations && message.segmentPresenceLabelAnnotations.length)) - message.segmentPresenceLabelAnnotations = []; - message.segmentPresenceLabelAnnotations.push($root.google.cloud.videointelligence.v1.LabelAnnotation.decode(reader, reader.uint32())); + message.timeOffset = $root.google.protobuf.Duration.decode(reader, reader.uint32()); break; case 3: - if (!(message.shotLabelAnnotations && message.shotLabelAnnotations.length)) - message.shotLabelAnnotations = []; - message.shotLabelAnnotations.push($root.google.cloud.videointelligence.v1.LabelAnnotation.decode(reader, reader.uint32())); - break; - case 24: - if (!(message.shotPresenceLabelAnnotations && message.shotPresenceLabelAnnotations.length)) - message.shotPresenceLabelAnnotations = []; - message.shotPresenceLabelAnnotations.push($root.google.cloud.videointelligence.v1.LabelAnnotation.decode(reader, reader.uint32())); + if (!(message.attributes && message.attributes.length)) + message.attributes = []; + message.attributes.push($root.google.cloud.videointelligence.v1.DetectedAttribute.decode(reader, reader.uint32())); break; case 4: - if (!(message.frameLabelAnnotations && message.frameLabelAnnotations.length)) - message.frameLabelAnnotations = []; - message.frameLabelAnnotations.push($root.google.cloud.videointelligence.v1.LabelAnnotation.decode(reader, reader.uint32())); + if (!(message.landmarks && message.landmarks.length)) + message.landmarks = []; + message.landmarks.push($root.google.cloud.videointelligence.v1.DetectedLandmark.decode(reader, reader.uint32())); break; - case 5: + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a TimestampedObject message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.videointelligence.v1.TimestampedObject + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.videointelligence.v1.TimestampedObject} TimestampedObject + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TimestampedObject.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a TimestampedObject message. + * @function verify + * @memberof google.cloud.videointelligence.v1.TimestampedObject + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + TimestampedObject.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.normalizedBoundingBox != null && message.hasOwnProperty("normalizedBoundingBox")) { + var error = $root.google.cloud.videointelligence.v1.NormalizedBoundingBox.verify(message.normalizedBoundingBox); + if (error) + return "normalizedBoundingBox." + error; + } + if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) { + var error = $root.google.protobuf.Duration.verify(message.timeOffset); + if (error) + return "timeOffset." + error; + } + if (message.attributes != null && message.hasOwnProperty("attributes")) { + if (!Array.isArray(message.attributes)) + return "attributes: array expected"; + for (var i = 0; i < message.attributes.length; ++i) { + var error = $root.google.cloud.videointelligence.v1.DetectedAttribute.verify(message.attributes[i]); + if (error) + return "attributes." + error; + } + } + if (message.landmarks != null && message.hasOwnProperty("landmarks")) { + if (!Array.isArray(message.landmarks)) + return "landmarks: array expected"; + for (var i = 0; i < message.landmarks.length; ++i) { + var error = $root.google.cloud.videointelligence.v1.DetectedLandmark.verify(message.landmarks[i]); + if (error) + return "landmarks." + error; + } + } + return null; + }; + + /** + * Creates a TimestampedObject message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.videointelligence.v1.TimestampedObject + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.videointelligence.v1.TimestampedObject} TimestampedObject + */ + TimestampedObject.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1.TimestampedObject) + return object; + var message = new $root.google.cloud.videointelligence.v1.TimestampedObject(); + if (object.normalizedBoundingBox != null) { + if (typeof object.normalizedBoundingBox !== "object") + throw TypeError(".google.cloud.videointelligence.v1.TimestampedObject.normalizedBoundingBox: object expected"); + message.normalizedBoundingBox = $root.google.cloud.videointelligence.v1.NormalizedBoundingBox.fromObject(object.normalizedBoundingBox); + } + if (object.timeOffset != null) { + if (typeof object.timeOffset !== "object") + throw TypeError(".google.cloud.videointelligence.v1.TimestampedObject.timeOffset: object expected"); + message.timeOffset = $root.google.protobuf.Duration.fromObject(object.timeOffset); + } + if (object.attributes) { + if (!Array.isArray(object.attributes)) + throw TypeError(".google.cloud.videointelligence.v1.TimestampedObject.attributes: array expected"); + message.attributes = []; + for (var i = 0; i < object.attributes.length; ++i) { + if (typeof object.attributes[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1.TimestampedObject.attributes: object expected"); + message.attributes[i] = $root.google.cloud.videointelligence.v1.DetectedAttribute.fromObject(object.attributes[i]); + } + } + if (object.landmarks) { + if (!Array.isArray(object.landmarks)) + throw TypeError(".google.cloud.videointelligence.v1.TimestampedObject.landmarks: array expected"); + message.landmarks = []; + for (var i = 0; i < object.landmarks.length; ++i) { + if (typeof object.landmarks[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1.TimestampedObject.landmarks: object expected"); + message.landmarks[i] = $root.google.cloud.videointelligence.v1.DetectedLandmark.fromObject(object.landmarks[i]); + } + } + return message; + }; + + /** + * Creates a plain object from a TimestampedObject message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.videointelligence.v1.TimestampedObject + * @static + * @param {google.cloud.videointelligence.v1.TimestampedObject} message TimestampedObject + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + TimestampedObject.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) { + object.attributes = []; + object.landmarks = []; + } + if (options.defaults) { + object.normalizedBoundingBox = null; + object.timeOffset = null; + } + if (message.normalizedBoundingBox != null && message.hasOwnProperty("normalizedBoundingBox")) + object.normalizedBoundingBox = $root.google.cloud.videointelligence.v1.NormalizedBoundingBox.toObject(message.normalizedBoundingBox, options); + if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) + object.timeOffset = $root.google.protobuf.Duration.toObject(message.timeOffset, options); + if (message.attributes && message.attributes.length) { + object.attributes = []; + for (var j = 0; j < message.attributes.length; ++j) + object.attributes[j] = $root.google.cloud.videointelligence.v1.DetectedAttribute.toObject(message.attributes[j], options); + } + if (message.landmarks && message.landmarks.length) { + object.landmarks = []; + for (var j = 0; j < message.landmarks.length; ++j) + object.landmarks[j] = $root.google.cloud.videointelligence.v1.DetectedLandmark.toObject(message.landmarks[j], options); + } + return object; + }; + + /** + * Converts this TimestampedObject to JSON. + * @function toJSON + * @memberof google.cloud.videointelligence.v1.TimestampedObject + * @instance + * @returns {Object.} JSON object + */ + TimestampedObject.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + return TimestampedObject; + })(); + + v1.Track = (function() { + + /** + * Properties of a Track. + * @memberof google.cloud.videointelligence.v1 + * @interface ITrack + * @property {google.cloud.videointelligence.v1.IVideoSegment|null} [segment] Track segment + * @property {Array.|null} [timestampedObjects] Track timestampedObjects + * @property {Array.|null} [attributes] Track attributes + * @property {number|null} [confidence] Track confidence + */ + + /** + * Constructs a new Track. + * @memberof google.cloud.videointelligence.v1 + * @classdesc Represents a Track. + * @implements ITrack + * @constructor + * @param {google.cloud.videointelligence.v1.ITrack=} [properties] Properties to set + */ + function Track(properties) { + this.timestampedObjects = []; + this.attributes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Track segment. + * @member {google.cloud.videointelligence.v1.IVideoSegment|null|undefined} segment + * @memberof google.cloud.videointelligence.v1.Track + * @instance + */ + Track.prototype.segment = null; + + /** + * Track timestampedObjects. + * @member {Array.} timestampedObjects + * @memberof google.cloud.videointelligence.v1.Track + * @instance + */ + Track.prototype.timestampedObjects = $util.emptyArray; + + /** + * Track attributes. + * @member {Array.} attributes + * @memberof google.cloud.videointelligence.v1.Track + * @instance + */ + Track.prototype.attributes = $util.emptyArray; + + /** + * Track confidence. + * @member {number} confidence + * @memberof google.cloud.videointelligence.v1.Track + * @instance + */ + Track.prototype.confidence = 0; + + /** + * Creates a new Track instance using the specified properties. + * @function create + * @memberof google.cloud.videointelligence.v1.Track + * @static + * @param {google.cloud.videointelligence.v1.ITrack=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1.Track} Track instance + */ + Track.create = function create(properties) { + return new Track(properties); + }; + + /** + * Encodes the specified Track message. Does not implicitly {@link google.cloud.videointelligence.v1.Track.verify|verify} messages. + * @function encode + * @memberof google.cloud.videointelligence.v1.Track + * @static + * @param {google.cloud.videointelligence.v1.ITrack} message Track message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Track.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.segment != null && message.hasOwnProperty("segment")) + $root.google.cloud.videointelligence.v1.VideoSegment.encode(message.segment, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.timestampedObjects != null && message.timestampedObjects.length) + for (var i = 0; i < message.timestampedObjects.length; ++i) + $root.google.cloud.videointelligence.v1.TimestampedObject.encode(message.timestampedObjects[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.attributes != null && message.attributes.length) + for (var i = 0; i < message.attributes.length; ++i) + $root.google.cloud.videointelligence.v1.DetectedAttribute.encode(message.attributes[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.confidence != null && message.hasOwnProperty("confidence")) + writer.uint32(/* id 4, wireType 5 =*/37).float(message.confidence); + return writer; + }; + + /** + * Encodes the specified Track message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1.Track.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.videointelligence.v1.Track + * @static + * @param {google.cloud.videointelligence.v1.ITrack} message Track message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Track.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Track message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.videointelligence.v1.Track + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.videointelligence.v1.Track} Track + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Track.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1.Track(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.segment = $root.google.cloud.videointelligence.v1.VideoSegment.decode(reader, reader.uint32()); + break; + case 2: + if (!(message.timestampedObjects && message.timestampedObjects.length)) + message.timestampedObjects = []; + message.timestampedObjects.push($root.google.cloud.videointelligence.v1.TimestampedObject.decode(reader, reader.uint32())); + break; + case 3: + if (!(message.attributes && message.attributes.length)) + message.attributes = []; + message.attributes.push($root.google.cloud.videointelligence.v1.DetectedAttribute.decode(reader, reader.uint32())); + break; + case 4: + message.confidence = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Track message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.videointelligence.v1.Track + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.videointelligence.v1.Track} Track + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Track.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Track message. + * @function verify + * @memberof google.cloud.videointelligence.v1.Track + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Track.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.segment != null && message.hasOwnProperty("segment")) { + var error = $root.google.cloud.videointelligence.v1.VideoSegment.verify(message.segment); + if (error) + return "segment." + error; + } + if (message.timestampedObjects != null && message.hasOwnProperty("timestampedObjects")) { + if (!Array.isArray(message.timestampedObjects)) + return "timestampedObjects: array expected"; + for (var i = 0; i < message.timestampedObjects.length; ++i) { + var error = $root.google.cloud.videointelligence.v1.TimestampedObject.verify(message.timestampedObjects[i]); + if (error) + return "timestampedObjects." + error; + } + } + if (message.attributes != null && message.hasOwnProperty("attributes")) { + if (!Array.isArray(message.attributes)) + return "attributes: array expected"; + for (var i = 0; i < message.attributes.length; ++i) { + var error = $root.google.cloud.videointelligence.v1.DetectedAttribute.verify(message.attributes[i]); + if (error) + return "attributes." + error; + } + } + if (message.confidence != null && message.hasOwnProperty("confidence")) + if (typeof message.confidence !== "number") + return "confidence: number expected"; + return null; + }; + + /** + * Creates a Track message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.videointelligence.v1.Track + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.videointelligence.v1.Track} Track + */ + Track.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1.Track) + return object; + var message = new $root.google.cloud.videointelligence.v1.Track(); + if (object.segment != null) { + if (typeof object.segment !== "object") + throw TypeError(".google.cloud.videointelligence.v1.Track.segment: object expected"); + message.segment = $root.google.cloud.videointelligence.v1.VideoSegment.fromObject(object.segment); + } + if (object.timestampedObjects) { + if (!Array.isArray(object.timestampedObjects)) + throw TypeError(".google.cloud.videointelligence.v1.Track.timestampedObjects: array expected"); + message.timestampedObjects = []; + for (var i = 0; i < object.timestampedObjects.length; ++i) { + if (typeof object.timestampedObjects[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1.Track.timestampedObjects: object expected"); + message.timestampedObjects[i] = $root.google.cloud.videointelligence.v1.TimestampedObject.fromObject(object.timestampedObjects[i]); + } + } + if (object.attributes) { + if (!Array.isArray(object.attributes)) + throw TypeError(".google.cloud.videointelligence.v1.Track.attributes: array expected"); + message.attributes = []; + for (var i = 0; i < object.attributes.length; ++i) { + if (typeof object.attributes[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1.Track.attributes: object expected"); + message.attributes[i] = $root.google.cloud.videointelligence.v1.DetectedAttribute.fromObject(object.attributes[i]); + } + } + if (object.confidence != null) + message.confidence = Number(object.confidence); + return message; + }; + + /** + * Creates a plain object from a Track message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.videointelligence.v1.Track + * @static + * @param {google.cloud.videointelligence.v1.Track} message Track + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Track.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) { + object.timestampedObjects = []; + object.attributes = []; + } + if (options.defaults) { + object.segment = null; + object.confidence = 0; + } + if (message.segment != null && message.hasOwnProperty("segment")) + object.segment = $root.google.cloud.videointelligence.v1.VideoSegment.toObject(message.segment, options); + if (message.timestampedObjects && message.timestampedObjects.length) { + object.timestampedObjects = []; + for (var j = 0; j < message.timestampedObjects.length; ++j) + object.timestampedObjects[j] = $root.google.cloud.videointelligence.v1.TimestampedObject.toObject(message.timestampedObjects[j], options); + } + if (message.attributes && message.attributes.length) { + object.attributes = []; + for (var j = 0; j < message.attributes.length; ++j) + object.attributes[j] = $root.google.cloud.videointelligence.v1.DetectedAttribute.toObject(message.attributes[j], options); + } + if (message.confidence != null && message.hasOwnProperty("confidence")) + object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; + return object; + }; + + /** + * Converts this Track to JSON. + * @function toJSON + * @memberof google.cloud.videointelligence.v1.Track + * @instance + * @returns {Object.} JSON object + */ + Track.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + return Track; + })(); + + v1.DetectedAttribute = (function() { + + /** + * Properties of a DetectedAttribute. + * @memberof google.cloud.videointelligence.v1 + * @interface IDetectedAttribute + * @property {string|null} [name] DetectedAttribute name + * @property {number|null} [confidence] DetectedAttribute confidence + * @property {string|null} [value] DetectedAttribute value + */ + + /** + * Constructs a new DetectedAttribute. + * @memberof google.cloud.videointelligence.v1 + * @classdesc Represents a DetectedAttribute. + * @implements IDetectedAttribute + * @constructor + * @param {google.cloud.videointelligence.v1.IDetectedAttribute=} [properties] Properties to set + */ + function DetectedAttribute(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * DetectedAttribute name. + * @member {string} name + * @memberof google.cloud.videointelligence.v1.DetectedAttribute + * @instance + */ + DetectedAttribute.prototype.name = ""; + + /** + * DetectedAttribute confidence. + * @member {number} confidence + * @memberof google.cloud.videointelligence.v1.DetectedAttribute + * @instance + */ + DetectedAttribute.prototype.confidence = 0; + + /** + * DetectedAttribute value. + * @member {string} value + * @memberof google.cloud.videointelligence.v1.DetectedAttribute + * @instance + */ + DetectedAttribute.prototype.value = ""; + + /** + * Creates a new DetectedAttribute instance using the specified properties. + * @function create + * @memberof google.cloud.videointelligence.v1.DetectedAttribute + * @static + * @param {google.cloud.videointelligence.v1.IDetectedAttribute=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1.DetectedAttribute} DetectedAttribute instance + */ + DetectedAttribute.create = function create(properties) { + return new DetectedAttribute(properties); + }; + + /** + * Encodes the specified DetectedAttribute message. Does not implicitly {@link google.cloud.videointelligence.v1.DetectedAttribute.verify|verify} messages. + * @function encode + * @memberof google.cloud.videointelligence.v1.DetectedAttribute + * @static + * @param {google.cloud.videointelligence.v1.IDetectedAttribute} message DetectedAttribute message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DetectedAttribute.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && message.hasOwnProperty("name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.confidence != null && message.hasOwnProperty("confidence")) + writer.uint32(/* id 2, wireType 5 =*/21).float(message.confidence); + if (message.value != null && message.hasOwnProperty("value")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.value); + return writer; + }; + + /** + * Encodes the specified DetectedAttribute message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1.DetectedAttribute.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.videointelligence.v1.DetectedAttribute + * @static + * @param {google.cloud.videointelligence.v1.IDetectedAttribute} message DetectedAttribute message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DetectedAttribute.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DetectedAttribute message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.videointelligence.v1.DetectedAttribute + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.videointelligence.v1.DetectedAttribute} DetectedAttribute + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DetectedAttribute.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1.DetectedAttribute(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.confidence = reader.float(); + break; + case 3: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DetectedAttribute message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.videointelligence.v1.DetectedAttribute + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.videointelligence.v1.DetectedAttribute} DetectedAttribute + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DetectedAttribute.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DetectedAttribute message. + * @function verify + * @memberof google.cloud.videointelligence.v1.DetectedAttribute + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DetectedAttribute.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.confidence != null && message.hasOwnProperty("confidence")) + if (typeof message.confidence !== "number") + return "confidence: number expected"; + if (message.value != null && message.hasOwnProperty("value")) + if (!$util.isString(message.value)) + return "value: string expected"; + return null; + }; + + /** + * Creates a DetectedAttribute message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.videointelligence.v1.DetectedAttribute + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.videointelligence.v1.DetectedAttribute} DetectedAttribute + */ + DetectedAttribute.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1.DetectedAttribute) + return object; + var message = new $root.google.cloud.videointelligence.v1.DetectedAttribute(); + if (object.name != null) + message.name = String(object.name); + if (object.confidence != null) + message.confidence = Number(object.confidence); + if (object.value != null) + message.value = String(object.value); + return message; + }; + + /** + * Creates a plain object from a DetectedAttribute message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.videointelligence.v1.DetectedAttribute + * @static + * @param {google.cloud.videointelligence.v1.DetectedAttribute} message DetectedAttribute + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DetectedAttribute.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.name = ""; + object.confidence = 0; + object.value = ""; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.confidence != null && message.hasOwnProperty("confidence")) + object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; + if (message.value != null && message.hasOwnProperty("value")) + object.value = message.value; + return object; + }; + + /** + * Converts this DetectedAttribute to JSON. + * @function toJSON + * @memberof google.cloud.videointelligence.v1.DetectedAttribute + * @instance + * @returns {Object.} JSON object + */ + DetectedAttribute.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + return DetectedAttribute; + })(); + + v1.DetectedLandmark = (function() { + + /** + * Properties of a DetectedLandmark. + * @memberof google.cloud.videointelligence.v1 + * @interface IDetectedLandmark + * @property {string|null} [name] DetectedLandmark name + * @property {google.cloud.videointelligence.v1.INormalizedVertex|null} [point] DetectedLandmark point + * @property {number|null} [confidence] DetectedLandmark confidence + */ + + /** + * Constructs a new DetectedLandmark. + * @memberof google.cloud.videointelligence.v1 + * @classdesc Represents a DetectedLandmark. + * @implements IDetectedLandmark + * @constructor + * @param {google.cloud.videointelligence.v1.IDetectedLandmark=} [properties] Properties to set + */ + function DetectedLandmark(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * DetectedLandmark name. + * @member {string} name + * @memberof google.cloud.videointelligence.v1.DetectedLandmark + * @instance + */ + DetectedLandmark.prototype.name = ""; + + /** + * DetectedLandmark point. + * @member {google.cloud.videointelligence.v1.INormalizedVertex|null|undefined} point + * @memberof google.cloud.videointelligence.v1.DetectedLandmark + * @instance + */ + DetectedLandmark.prototype.point = null; + + /** + * DetectedLandmark confidence. + * @member {number} confidence + * @memberof google.cloud.videointelligence.v1.DetectedLandmark + * @instance + */ + DetectedLandmark.prototype.confidence = 0; + + /** + * Creates a new DetectedLandmark instance using the specified properties. + * @function create + * @memberof google.cloud.videointelligence.v1.DetectedLandmark + * @static + * @param {google.cloud.videointelligence.v1.IDetectedLandmark=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1.DetectedLandmark} DetectedLandmark instance + */ + DetectedLandmark.create = function create(properties) { + return new DetectedLandmark(properties); + }; + + /** + * Encodes the specified DetectedLandmark message. Does not implicitly {@link google.cloud.videointelligence.v1.DetectedLandmark.verify|verify} messages. + * @function encode + * @memberof google.cloud.videointelligence.v1.DetectedLandmark + * @static + * @param {google.cloud.videointelligence.v1.IDetectedLandmark} message DetectedLandmark message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DetectedLandmark.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && message.hasOwnProperty("name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.point != null && message.hasOwnProperty("point")) + $root.google.cloud.videointelligence.v1.NormalizedVertex.encode(message.point, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.confidence != null && message.hasOwnProperty("confidence")) + writer.uint32(/* id 3, wireType 5 =*/29).float(message.confidence); + return writer; + }; + + /** + * Encodes the specified DetectedLandmark message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1.DetectedLandmark.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.videointelligence.v1.DetectedLandmark + * @static + * @param {google.cloud.videointelligence.v1.IDetectedLandmark} message DetectedLandmark message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DetectedLandmark.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DetectedLandmark message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.videointelligence.v1.DetectedLandmark + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.videointelligence.v1.DetectedLandmark} DetectedLandmark + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DetectedLandmark.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1.DetectedLandmark(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.point = $root.google.cloud.videointelligence.v1.NormalizedVertex.decode(reader, reader.uint32()); + break; + case 3: + message.confidence = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DetectedLandmark message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.videointelligence.v1.DetectedLandmark + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.videointelligence.v1.DetectedLandmark} DetectedLandmark + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DetectedLandmark.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DetectedLandmark message. + * @function verify + * @memberof google.cloud.videointelligence.v1.DetectedLandmark + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DetectedLandmark.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.point != null && message.hasOwnProperty("point")) { + var error = $root.google.cloud.videointelligence.v1.NormalizedVertex.verify(message.point); + if (error) + return "point." + error; + } + if (message.confidence != null && message.hasOwnProperty("confidence")) + if (typeof message.confidence !== "number") + return "confidence: number expected"; + return null; + }; + + /** + * Creates a DetectedLandmark message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.videointelligence.v1.DetectedLandmark + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.videointelligence.v1.DetectedLandmark} DetectedLandmark + */ + DetectedLandmark.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1.DetectedLandmark) + return object; + var message = new $root.google.cloud.videointelligence.v1.DetectedLandmark(); + if (object.name != null) + message.name = String(object.name); + if (object.point != null) { + if (typeof object.point !== "object") + throw TypeError(".google.cloud.videointelligence.v1.DetectedLandmark.point: object expected"); + message.point = $root.google.cloud.videointelligence.v1.NormalizedVertex.fromObject(object.point); + } + if (object.confidence != null) + message.confidence = Number(object.confidence); + return message; + }; + + /** + * Creates a plain object from a DetectedLandmark message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.videointelligence.v1.DetectedLandmark + * @static + * @param {google.cloud.videointelligence.v1.DetectedLandmark} message DetectedLandmark + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DetectedLandmark.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.name = ""; + object.point = null; + object.confidence = 0; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.point != null && message.hasOwnProperty("point")) + object.point = $root.google.cloud.videointelligence.v1.NormalizedVertex.toObject(message.point, options); + if (message.confidence != null && message.hasOwnProperty("confidence")) + object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; + return object; + }; + + /** + * Converts this DetectedLandmark to JSON. + * @function toJSON + * @memberof google.cloud.videointelligence.v1.DetectedLandmark + * @instance + * @returns {Object.} JSON object + */ + DetectedLandmark.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + return DetectedLandmark; + })(); + + v1.VideoAnnotationResults = (function() { + + /** + * Properties of a VideoAnnotationResults. + * @memberof google.cloud.videointelligence.v1 + * @interface IVideoAnnotationResults + * @property {string|null} [inputUri] VideoAnnotationResults inputUri + * @property {google.cloud.videointelligence.v1.IVideoSegment|null} [segment] VideoAnnotationResults segment + * @property {Array.|null} [segmentLabelAnnotations] VideoAnnotationResults segmentLabelAnnotations + * @property {Array.|null} [segmentPresenceLabelAnnotations] VideoAnnotationResults segmentPresenceLabelAnnotations + * @property {Array.|null} [shotLabelAnnotations] VideoAnnotationResults shotLabelAnnotations + * @property {Array.|null} [shotPresenceLabelAnnotations] VideoAnnotationResults shotPresenceLabelAnnotations + * @property {Array.|null} [frameLabelAnnotations] VideoAnnotationResults frameLabelAnnotations + * @property {Array.|null} [faceAnnotations] VideoAnnotationResults faceAnnotations + * @property {Array.|null} [shotAnnotations] VideoAnnotationResults shotAnnotations + * @property {google.cloud.videointelligence.v1.IExplicitContentAnnotation|null} [explicitAnnotation] VideoAnnotationResults explicitAnnotation + * @property {Array.|null} [speechTranscriptions] VideoAnnotationResults speechTranscriptions + * @property {Array.|null} [textAnnotations] VideoAnnotationResults textAnnotations + * @property {Array.|null} [objectAnnotations] VideoAnnotationResults objectAnnotations + * @property {Array.|null} [logoRecognitionAnnotations] VideoAnnotationResults logoRecognitionAnnotations + * @property {google.rpc.IStatus|null} [error] VideoAnnotationResults error + */ + + /** + * Constructs a new VideoAnnotationResults. + * @memberof google.cloud.videointelligence.v1 + * @classdesc Represents a VideoAnnotationResults. + * @implements IVideoAnnotationResults + * @constructor + * @param {google.cloud.videointelligence.v1.IVideoAnnotationResults=} [properties] Properties to set + */ + function VideoAnnotationResults(properties) { + this.segmentLabelAnnotations = []; + this.segmentPresenceLabelAnnotations = []; + this.shotLabelAnnotations = []; + this.shotPresenceLabelAnnotations = []; + this.frameLabelAnnotations = []; + this.faceAnnotations = []; + this.shotAnnotations = []; + this.speechTranscriptions = []; + this.textAnnotations = []; + this.objectAnnotations = []; + this.logoRecognitionAnnotations = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * VideoAnnotationResults inputUri. + * @member {string} inputUri + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.inputUri = ""; + + /** + * VideoAnnotationResults segment. + * @member {google.cloud.videointelligence.v1.IVideoSegment|null|undefined} segment + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.segment = null; + + /** + * VideoAnnotationResults segmentLabelAnnotations. + * @member {Array.} segmentLabelAnnotations + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.segmentLabelAnnotations = $util.emptyArray; + + /** + * VideoAnnotationResults segmentPresenceLabelAnnotations. + * @member {Array.} segmentPresenceLabelAnnotations + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.segmentPresenceLabelAnnotations = $util.emptyArray; + + /** + * VideoAnnotationResults shotLabelAnnotations. + * @member {Array.} shotLabelAnnotations + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.shotLabelAnnotations = $util.emptyArray; + + /** + * VideoAnnotationResults shotPresenceLabelAnnotations. + * @member {Array.} shotPresenceLabelAnnotations + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.shotPresenceLabelAnnotations = $util.emptyArray; + + /** + * VideoAnnotationResults frameLabelAnnotations. + * @member {Array.} frameLabelAnnotations + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.frameLabelAnnotations = $util.emptyArray; + + /** + * VideoAnnotationResults faceAnnotations. + * @member {Array.} faceAnnotations + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.faceAnnotations = $util.emptyArray; + + /** + * VideoAnnotationResults shotAnnotations. + * @member {Array.} shotAnnotations + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.shotAnnotations = $util.emptyArray; + + /** + * VideoAnnotationResults explicitAnnotation. + * @member {google.cloud.videointelligence.v1.IExplicitContentAnnotation|null|undefined} explicitAnnotation + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.explicitAnnotation = null; + + /** + * VideoAnnotationResults speechTranscriptions. + * @member {Array.} speechTranscriptions + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.speechTranscriptions = $util.emptyArray; + + /** + * VideoAnnotationResults textAnnotations. + * @member {Array.} textAnnotations + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.textAnnotations = $util.emptyArray; + + /** + * VideoAnnotationResults objectAnnotations. + * @member {Array.} objectAnnotations + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.objectAnnotations = $util.emptyArray; + + /** + * VideoAnnotationResults logoRecognitionAnnotations. + * @member {Array.} logoRecognitionAnnotations + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.logoRecognitionAnnotations = $util.emptyArray; + + /** + * VideoAnnotationResults error. + * @member {google.rpc.IStatus|null|undefined} error + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.error = null; + + /** + * Creates a new VideoAnnotationResults instance using the specified properties. + * @function create + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @static + * @param {google.cloud.videointelligence.v1.IVideoAnnotationResults=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1.VideoAnnotationResults} VideoAnnotationResults instance + */ + VideoAnnotationResults.create = function create(properties) { + return new VideoAnnotationResults(properties); + }; + + /** + * Encodes the specified VideoAnnotationResults message. Does not implicitly {@link google.cloud.videointelligence.v1.VideoAnnotationResults.verify|verify} messages. + * @function encode + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @static + * @param {google.cloud.videointelligence.v1.IVideoAnnotationResults} message VideoAnnotationResults message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + VideoAnnotationResults.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.inputUri != null && message.hasOwnProperty("inputUri")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.inputUri); + if (message.segmentLabelAnnotations != null && message.segmentLabelAnnotations.length) + for (var i = 0; i < message.segmentLabelAnnotations.length; ++i) + $root.google.cloud.videointelligence.v1.LabelAnnotation.encode(message.segmentLabelAnnotations[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.shotLabelAnnotations != null && message.shotLabelAnnotations.length) + for (var i = 0; i < message.shotLabelAnnotations.length; ++i) + $root.google.cloud.videointelligence.v1.LabelAnnotation.encode(message.shotLabelAnnotations[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.frameLabelAnnotations != null && message.frameLabelAnnotations.length) + for (var i = 0; i < message.frameLabelAnnotations.length; ++i) + $root.google.cloud.videointelligence.v1.LabelAnnotation.encode(message.frameLabelAnnotations[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.faceAnnotations != null && message.faceAnnotations.length) + for (var i = 0; i < message.faceAnnotations.length; ++i) + $root.google.cloud.videointelligence.v1.FaceAnnotation.encode(message.faceAnnotations[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.shotAnnotations != null && message.shotAnnotations.length) + for (var i = 0; i < message.shotAnnotations.length; ++i) + $root.google.cloud.videointelligence.v1.VideoSegment.encode(message.shotAnnotations[i], writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + if (message.explicitAnnotation != null && message.hasOwnProperty("explicitAnnotation")) + $root.google.cloud.videointelligence.v1.ExplicitContentAnnotation.encode(message.explicitAnnotation, writer.uint32(/* id 7, wireType 2 =*/58).fork()).ldelim(); + if (message.error != null && message.hasOwnProperty("error")) + $root.google.rpc.Status.encode(message.error, writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); + if (message.segment != null && message.hasOwnProperty("segment")) + $root.google.cloud.videointelligence.v1.VideoSegment.encode(message.segment, writer.uint32(/* id 10, wireType 2 =*/82).fork()).ldelim(); + if (message.speechTranscriptions != null && message.speechTranscriptions.length) + for (var i = 0; i < message.speechTranscriptions.length; ++i) + $root.google.cloud.videointelligence.v1.SpeechTranscription.encode(message.speechTranscriptions[i], writer.uint32(/* id 11, wireType 2 =*/90).fork()).ldelim(); + if (message.textAnnotations != null && message.textAnnotations.length) + for (var i = 0; i < message.textAnnotations.length; ++i) + $root.google.cloud.videointelligence.v1.TextAnnotation.encode(message.textAnnotations[i], writer.uint32(/* id 12, wireType 2 =*/98).fork()).ldelim(); + if (message.objectAnnotations != null && message.objectAnnotations.length) + for (var i = 0; i < message.objectAnnotations.length; ++i) + $root.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.encode(message.objectAnnotations[i], writer.uint32(/* id 14, wireType 2 =*/114).fork()).ldelim(); + if (message.logoRecognitionAnnotations != null && message.logoRecognitionAnnotations.length) + for (var i = 0; i < message.logoRecognitionAnnotations.length; ++i) + $root.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.encode(message.logoRecognitionAnnotations[i], writer.uint32(/* id 19, wireType 2 =*/154).fork()).ldelim(); + if (message.segmentPresenceLabelAnnotations != null && message.segmentPresenceLabelAnnotations.length) + for (var i = 0; i < message.segmentPresenceLabelAnnotations.length; ++i) + $root.google.cloud.videointelligence.v1.LabelAnnotation.encode(message.segmentPresenceLabelAnnotations[i], writer.uint32(/* id 23, wireType 2 =*/186).fork()).ldelim(); + if (message.shotPresenceLabelAnnotations != null && message.shotPresenceLabelAnnotations.length) + for (var i = 0; i < message.shotPresenceLabelAnnotations.length; ++i) + $root.google.cloud.videointelligence.v1.LabelAnnotation.encode(message.shotPresenceLabelAnnotations[i], writer.uint32(/* id 24, wireType 2 =*/194).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified VideoAnnotationResults message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1.VideoAnnotationResults.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @static + * @param {google.cloud.videointelligence.v1.IVideoAnnotationResults} message VideoAnnotationResults message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + VideoAnnotationResults.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a VideoAnnotationResults message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.videointelligence.v1.VideoAnnotationResults + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.videointelligence.v1.VideoAnnotationResults} VideoAnnotationResults + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + VideoAnnotationResults.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1.VideoAnnotationResults(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputUri = reader.string(); + break; + case 10: + message.segment = $root.google.cloud.videointelligence.v1.VideoSegment.decode(reader, reader.uint32()); + break; + case 2: + if (!(message.segmentLabelAnnotations && message.segmentLabelAnnotations.length)) + message.segmentLabelAnnotations = []; + message.segmentLabelAnnotations.push($root.google.cloud.videointelligence.v1.LabelAnnotation.decode(reader, reader.uint32())); + break; + case 23: + if (!(message.segmentPresenceLabelAnnotations && message.segmentPresenceLabelAnnotations.length)) + message.segmentPresenceLabelAnnotations = []; + message.segmentPresenceLabelAnnotations.push($root.google.cloud.videointelligence.v1.LabelAnnotation.decode(reader, reader.uint32())); + break; + case 3: + if (!(message.shotLabelAnnotations && message.shotLabelAnnotations.length)) + message.shotLabelAnnotations = []; + message.shotLabelAnnotations.push($root.google.cloud.videointelligence.v1.LabelAnnotation.decode(reader, reader.uint32())); + break; + case 24: + if (!(message.shotPresenceLabelAnnotations && message.shotPresenceLabelAnnotations.length)) + message.shotPresenceLabelAnnotations = []; + message.shotPresenceLabelAnnotations.push($root.google.cloud.videointelligence.v1.LabelAnnotation.decode(reader, reader.uint32())); + break; + case 4: + if (!(message.frameLabelAnnotations && message.frameLabelAnnotations.length)) + message.frameLabelAnnotations = []; + message.frameLabelAnnotations.push($root.google.cloud.videointelligence.v1.LabelAnnotation.decode(reader, reader.uint32())); + break; + case 5: if (!(message.faceAnnotations && message.faceAnnotations.length)) message.faceAnnotations = []; message.faceAnnotations.push($root.google.cloud.videointelligence.v1.FaceAnnotation.decode(reader, reader.uint32())); @@ -5218,6 +6318,11 @@ message.objectAnnotations = []; message.objectAnnotations.push($root.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.decode(reader, reader.uint32())); break; + case 19: + if (!(message.logoRecognitionAnnotations && message.logoRecognitionAnnotations.length)) + message.logoRecognitionAnnotations = []; + message.logoRecognitionAnnotations.push($root.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.decode(reader, reader.uint32())); + break; case 9: message.error = $root.google.rpc.Status.decode(reader, reader.uint32()); break; @@ -5359,6 +6464,15 @@ return "objectAnnotations." + error; } } + if (message.logoRecognitionAnnotations != null && message.hasOwnProperty("logoRecognitionAnnotations")) { + if (!Array.isArray(message.logoRecognitionAnnotations)) + return "logoRecognitionAnnotations: array expected"; + for (var i = 0; i < message.logoRecognitionAnnotations.length; ++i) { + var error = $root.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.verify(message.logoRecognitionAnnotations[i]); + if (error) + return "logoRecognitionAnnotations." + error; + } + } if (message.error != null && message.hasOwnProperty("error")) { var error = $root.google.rpc.Status.verify(message.error); if (error) @@ -5491,6 +6605,16 @@ message.objectAnnotations[i] = $root.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.fromObject(object.objectAnnotations[i]); } } + if (object.logoRecognitionAnnotations) { + if (!Array.isArray(object.logoRecognitionAnnotations)) + throw TypeError(".google.cloud.videointelligence.v1.VideoAnnotationResults.logoRecognitionAnnotations: array expected"); + message.logoRecognitionAnnotations = []; + for (var i = 0; i < object.logoRecognitionAnnotations.length; ++i) { + if (typeof object.logoRecognitionAnnotations[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1.VideoAnnotationResults.logoRecognitionAnnotations: object expected"); + message.logoRecognitionAnnotations[i] = $root.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.fromObject(object.logoRecognitionAnnotations[i]); + } + } if (object.error != null) { if (typeof object.error !== "object") throw TypeError(".google.cloud.videointelligence.v1.VideoAnnotationResults.error: object expected"); @@ -5521,6 +6645,7 @@ object.speechTranscriptions = []; object.textAnnotations = []; object.objectAnnotations = []; + object.logoRecognitionAnnotations = []; object.segmentPresenceLabelAnnotations = []; object.shotPresenceLabelAnnotations = []; } @@ -5578,6 +6703,11 @@ for (var j = 0; j < message.objectAnnotations.length; ++j) object.objectAnnotations[j] = $root.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.toObject(message.objectAnnotations[j], options); } + if (message.logoRecognitionAnnotations && message.logoRecognitionAnnotations.length) { + object.logoRecognitionAnnotations = []; + for (var j = 0; j < message.logoRecognitionAnnotations.length; ++j) + object.logoRecognitionAnnotations[j] = $root.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.toObject(message.logoRecognitionAnnotations[j], options); + } if (message.segmentPresenceLabelAnnotations && message.segmentPresenceLabelAnnotations.length) { object.segmentPresenceLabelAnnotations = []; for (var j = 0; j < message.segmentPresenceLabelAnnotations.length; ++j) @@ -6041,6 +7171,7 @@ case 6: case 7: case 9: + case 12: break; } if (message.segment != null && message.hasOwnProperty("segment")) { @@ -6110,6 +7241,10 @@ case 9: message.feature = 9; break; + case "LOGO_RECOGNITION": + case 12: + message.feature = 12; + break; } if (object.segment != null) { if (typeof object.segment !== "object") @@ -9454,6 +10589,286 @@ return ObjectTrackingAnnotation; })(); + v1.LogoRecognitionAnnotation = (function() { + + /** + * Properties of a LogoRecognitionAnnotation. + * @memberof google.cloud.videointelligence.v1 + * @interface ILogoRecognitionAnnotation + * @property {google.cloud.videointelligence.v1.IEntity|null} [entity] LogoRecognitionAnnotation entity + * @property {Array.|null} [tracks] LogoRecognitionAnnotation tracks + * @property {Array.|null} [segments] LogoRecognitionAnnotation segments + */ + + /** + * Constructs a new LogoRecognitionAnnotation. + * @memberof google.cloud.videointelligence.v1 + * @classdesc Represents a LogoRecognitionAnnotation. + * @implements ILogoRecognitionAnnotation + * @constructor + * @param {google.cloud.videointelligence.v1.ILogoRecognitionAnnotation=} [properties] Properties to set + */ + function LogoRecognitionAnnotation(properties) { + this.tracks = []; + this.segments = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * LogoRecognitionAnnotation entity. + * @member {google.cloud.videointelligence.v1.IEntity|null|undefined} entity + * @memberof google.cloud.videointelligence.v1.LogoRecognitionAnnotation + * @instance + */ + LogoRecognitionAnnotation.prototype.entity = null; + + /** + * LogoRecognitionAnnotation tracks. + * @member {Array.} tracks + * @memberof google.cloud.videointelligence.v1.LogoRecognitionAnnotation + * @instance + */ + LogoRecognitionAnnotation.prototype.tracks = $util.emptyArray; + + /** + * LogoRecognitionAnnotation segments. + * @member {Array.} segments + * @memberof google.cloud.videointelligence.v1.LogoRecognitionAnnotation + * @instance + */ + LogoRecognitionAnnotation.prototype.segments = $util.emptyArray; + + /** + * Creates a new LogoRecognitionAnnotation instance using the specified properties. + * @function create + * @memberof google.cloud.videointelligence.v1.LogoRecognitionAnnotation + * @static + * @param {google.cloud.videointelligence.v1.ILogoRecognitionAnnotation=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1.LogoRecognitionAnnotation} LogoRecognitionAnnotation instance + */ + LogoRecognitionAnnotation.create = function create(properties) { + return new LogoRecognitionAnnotation(properties); + }; + + /** + * Encodes the specified LogoRecognitionAnnotation message. Does not implicitly {@link google.cloud.videointelligence.v1.LogoRecognitionAnnotation.verify|verify} messages. + * @function encode + * @memberof google.cloud.videointelligence.v1.LogoRecognitionAnnotation + * @static + * @param {google.cloud.videointelligence.v1.ILogoRecognitionAnnotation} message LogoRecognitionAnnotation message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + LogoRecognitionAnnotation.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.entity != null && message.hasOwnProperty("entity")) + $root.google.cloud.videointelligence.v1.Entity.encode(message.entity, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.tracks != null && message.tracks.length) + for (var i = 0; i < message.tracks.length; ++i) + $root.google.cloud.videointelligence.v1.Track.encode(message.tracks[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.segments != null && message.segments.length) + for (var i = 0; i < message.segments.length; ++i) + $root.google.cloud.videointelligence.v1.VideoSegment.encode(message.segments[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified LogoRecognitionAnnotation message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1.LogoRecognitionAnnotation.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.videointelligence.v1.LogoRecognitionAnnotation + * @static + * @param {google.cloud.videointelligence.v1.ILogoRecognitionAnnotation} message LogoRecognitionAnnotation message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + LogoRecognitionAnnotation.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a LogoRecognitionAnnotation message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.videointelligence.v1.LogoRecognitionAnnotation + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.videointelligence.v1.LogoRecognitionAnnotation} LogoRecognitionAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + LogoRecognitionAnnotation.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1.LogoRecognitionAnnotation(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.entity = $root.google.cloud.videointelligence.v1.Entity.decode(reader, reader.uint32()); + break; + case 2: + if (!(message.tracks && message.tracks.length)) + message.tracks = []; + message.tracks.push($root.google.cloud.videointelligence.v1.Track.decode(reader, reader.uint32())); + break; + case 3: + if (!(message.segments && message.segments.length)) + message.segments = []; + message.segments.push($root.google.cloud.videointelligence.v1.VideoSegment.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a LogoRecognitionAnnotation message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.videointelligence.v1.LogoRecognitionAnnotation + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.videointelligence.v1.LogoRecognitionAnnotation} LogoRecognitionAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + LogoRecognitionAnnotation.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a LogoRecognitionAnnotation message. + * @function verify + * @memberof google.cloud.videointelligence.v1.LogoRecognitionAnnotation + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + LogoRecognitionAnnotation.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.entity != null && message.hasOwnProperty("entity")) { + var error = $root.google.cloud.videointelligence.v1.Entity.verify(message.entity); + if (error) + return "entity." + error; + } + if (message.tracks != null && message.hasOwnProperty("tracks")) { + if (!Array.isArray(message.tracks)) + return "tracks: array expected"; + for (var i = 0; i < message.tracks.length; ++i) { + var error = $root.google.cloud.videointelligence.v1.Track.verify(message.tracks[i]); + if (error) + return "tracks." + error; + } + } + if (message.segments != null && message.hasOwnProperty("segments")) { + if (!Array.isArray(message.segments)) + return "segments: array expected"; + for (var i = 0; i < message.segments.length; ++i) { + var error = $root.google.cloud.videointelligence.v1.VideoSegment.verify(message.segments[i]); + if (error) + return "segments." + error; + } + } + return null; + }; + + /** + * Creates a LogoRecognitionAnnotation message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.videointelligence.v1.LogoRecognitionAnnotation + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.videointelligence.v1.LogoRecognitionAnnotation} LogoRecognitionAnnotation + */ + LogoRecognitionAnnotation.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1.LogoRecognitionAnnotation) + return object; + var message = new $root.google.cloud.videointelligence.v1.LogoRecognitionAnnotation(); + if (object.entity != null) { + if (typeof object.entity !== "object") + throw TypeError(".google.cloud.videointelligence.v1.LogoRecognitionAnnotation.entity: object expected"); + message.entity = $root.google.cloud.videointelligence.v1.Entity.fromObject(object.entity); + } + if (object.tracks) { + if (!Array.isArray(object.tracks)) + throw TypeError(".google.cloud.videointelligence.v1.LogoRecognitionAnnotation.tracks: array expected"); + message.tracks = []; + for (var i = 0; i < object.tracks.length; ++i) { + if (typeof object.tracks[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1.LogoRecognitionAnnotation.tracks: object expected"); + message.tracks[i] = $root.google.cloud.videointelligence.v1.Track.fromObject(object.tracks[i]); + } + } + if (object.segments) { + if (!Array.isArray(object.segments)) + throw TypeError(".google.cloud.videointelligence.v1.LogoRecognitionAnnotation.segments: array expected"); + message.segments = []; + for (var i = 0; i < object.segments.length; ++i) { + if (typeof object.segments[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1.LogoRecognitionAnnotation.segments: object expected"); + message.segments[i] = $root.google.cloud.videointelligence.v1.VideoSegment.fromObject(object.segments[i]); + } + } + return message; + }; + + /** + * Creates a plain object from a LogoRecognitionAnnotation message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.videointelligence.v1.LogoRecognitionAnnotation + * @static + * @param {google.cloud.videointelligence.v1.LogoRecognitionAnnotation} message LogoRecognitionAnnotation + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + LogoRecognitionAnnotation.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) { + object.tracks = []; + object.segments = []; + } + if (options.defaults) + object.entity = null; + if (message.entity != null && message.hasOwnProperty("entity")) + object.entity = $root.google.cloud.videointelligence.v1.Entity.toObject(message.entity, options); + if (message.tracks && message.tracks.length) { + object.tracks = []; + for (var j = 0; j < message.tracks.length; ++j) + object.tracks[j] = $root.google.cloud.videointelligence.v1.Track.toObject(message.tracks[j], options); + } + if (message.segments && message.segments.length) { + object.segments = []; + for (var j = 0; j < message.segments.length; ++j) + object.segments[j] = $root.google.cloud.videointelligence.v1.VideoSegment.toObject(message.segments[j], options); + } + return object; + }; + + /** + * Converts this LogoRecognitionAnnotation to JSON. + * @function toJSON + * @memberof google.cloud.videointelligence.v1.LogoRecognitionAnnotation + * @instance + * @returns {Object.} JSON object + */ + LogoRecognitionAnnotation.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + return LogoRecognitionAnnotation; + })(); + return v1; })(); diff --git a/packages/google-cloud-videointelligence/protos/protos.json b/packages/google-cloud-videointelligence/protos/protos.json index 6c026992bb4..95f007a3f4e 100644 --- a/packages/google-cloud-videointelligence/protos/protos.json +++ b/packages/google-cloud-videointelligence/protos/protos.json @@ -120,7 +120,8 @@ "FACE_DETECTION": 4, "SPEECH_TRANSCRIPTION": 6, "TEXT_DETECTION": 7, - "OBJECT_TRACKING": 9 + "OBJECT_TRACKING": 9, + "LOGO_RECOGNITION": 12 } }, "LabelDetectionMode": { @@ -369,6 +370,94 @@ } } }, + "TimestampedObject": { + "fields": { + "normalizedBoundingBox": { + "type": "NormalizedBoundingBox", + "id": 1 + }, + "timeOffset": { + "type": "google.protobuf.Duration", + "id": 2 + }, + "attributes": { + "rule": "repeated", + "type": "DetectedAttribute", + "id": 3, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + }, + "landmarks": { + "rule": "repeated", + "type": "DetectedLandmark", + "id": 4, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + } + } + }, + "Track": { + "fields": { + "segment": { + "type": "VideoSegment", + "id": 1 + }, + "timestampedObjects": { + "rule": "repeated", + "type": "TimestampedObject", + "id": 2 + }, + "attributes": { + "rule": "repeated", + "type": "DetectedAttribute", + "id": 3, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + }, + "confidence": { + "type": "float", + "id": 4, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + } + } + }, + "DetectedAttribute": { + "fields": { + "name": { + "type": "string", + "id": 1 + }, + "confidence": { + "type": "float", + "id": 2 + }, + "value": { + "type": "string", + "id": 3 + } + } + }, + "DetectedLandmark": { + "fields": { + "name": { + "type": "string", + "id": 1 + }, + "point": { + "type": "NormalizedVertex", + "id": 2 + }, + "confidence": { + "type": "float", + "id": 3 + } + } + }, "VideoAnnotationResults": { "fields": { "inputUri": { @@ -433,6 +522,11 @@ "type": "ObjectTrackingAnnotation", "id": 14 }, + "logoRecognitionAnnotations": { + "rule": "repeated", + "type": "LogoRecognitionAnnotation", + "id": 19 + }, "error": { "type": "google.rpc.Status", "id": 9 @@ -742,6 +836,24 @@ "id": 2 } } + }, + "LogoRecognitionAnnotation": { + "fields": { + "entity": { + "type": "Entity", + "id": 1 + }, + "tracks": { + "rule": "repeated", + "type": "Track", + "id": 2 + }, + "segments": { + "rule": "repeated", + "type": "VideoSegment", + "id": 3 + } + } } } }, diff --git a/packages/google-cloud-videointelligence/samples/README.md b/packages/google-cloud-videointelligence/samples/README.md index bc1dd1e0bdb..53c963d542a 100644 --- a/packages/google-cloud-videointelligence/samples/README.md +++ b/packages/google-cloud-videointelligence/samples/README.md @@ -24,6 +24,8 @@ * [Analyze_face_detection_gcs](#analyze_face_detection_gcs) * [Analyze_person_detection](#analyze_person_detection) * [Analyze_person_detection_gcs](#analyze_person_detection_gcs) + * [Detect_logo](#detect_logo) + * [Detect_logo_gcs](#detect_logo_gcs) * [Quickstart](#quickstart) ## Before you begin @@ -245,6 +247,40 @@ __Usage:__ +### Detect_logo + +View the [source code](https://github.com/googleapis/nodejs-video-intelligence/blob/master/samples/detect_logo.js). + +[![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-video-intelligence&page=editor&open_in_editor=samples/detect_logo.js,samples/README.md) + +__Usage:__ + + +`node samples/detect_logo.js` + + +----- + + + + +### Detect_logo_gcs + +View the [source code](https://github.com/googleapis/nodejs-video-intelligence/blob/master/samples/detect_logo_gcs.js). + +[![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-video-intelligence&page=editor&open_in_editor=samples/detect_logo_gcs.js,samples/README.md) + +__Usage:__ + + +`node samples/detect_logo_gcs.js` + + +----- + + + + ### Quickstart View the [source code](https://github.com/googleapis/nodejs-video-intelligence/blob/master/samples/quickstart.js). diff --git a/packages/google-cloud-videointelligence/synth.metadata b/packages/google-cloud-videointelligence/synth.metadata index 4adf647d80b..56385c828c4 100644 --- a/packages/google-cloud-videointelligence/synth.metadata +++ b/packages/google-cloud-videointelligence/synth.metadata @@ -1,20 +1,20 @@ { - "updateTime": "2020-03-09T11:45:53.267362Z", + "updateTime": "2020-03-18T11:55:32.761647Z", "sources": [ { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "af7dff701fabe029672168649c62356cf1bb43d0", - "internalRef": "299724050", - "log": "af7dff701fabe029672168649c62356cf1bb43d0\nAdd LogPlayerReports and LogImpressions to Playable Locations service\n\nPiperOrigin-RevId: 299724050\n\nb6927fca808f38df32a642c560082f5bf6538ced\nUpdate BigQuery Connection API v1beta1 proto: added credential to CloudSqlProperties.\n\nPiperOrigin-RevId: 299503150\n\n91e1fb5ef9829c0c7a64bfa5bde330e6ed594378\nchore: update protobuf (protoc) version to 3.11.2\n\nPiperOrigin-RevId: 299404145\n\n30e36b4bee6749c4799f4fc1a51cc8f058ba167d\nUpdate cloud asset api v1p4beta1.\n\nPiperOrigin-RevId: 299399890\n\nffbb493674099f265693872ae250711b2238090c\nfeat: cloudbuild/v1 add new fields and annotate OUTPUT_OUT fields.\n\nPiperOrigin-RevId: 299397780\n\nbc973a15818e00c19e121959832676e9b7607456\nbazel: Fix broken common dependency\n\nPiperOrigin-RevId: 299397431\n\n71094a343e3b962e744aa49eb9338219537474e4\nchore: bigtable/admin/v2 publish retry config\n\nPiperOrigin-RevId: 299391875\n\n8f488efd7bda33885cb674ddd023b3678c40bd82\nfeat: Migrate logging to GAPIC v2; release new features.\n\nIMPORTANT: This is a breaking change for client libraries\nin all languages.\n\nCommitter: @lukesneeringer, @jskeet\nPiperOrigin-RevId: 299370279\n\n007605bf9ad3a1fd775014ebefbf7f1e6b31ee71\nUpdate API for bigqueryreservation v1beta1.\n- Adds flex capacity commitment plan to CapacityCommitment.\n- Adds methods for getting and updating BiReservations.\n- Adds methods for updating/splitting/merging CapacityCommitments.\n\nPiperOrigin-RevId: 299368059\n\n" + "sha": "4ba9aa8a4a1413b88dca5a8fa931824ee9c284e6", + "internalRef": "299971671", + "log": "4ba9aa8a4a1413b88dca5a8fa931824ee9c284e6\nExpose logo recognition API proto for GA.\n\nPiperOrigin-RevId: 299971671\n\n1c9fc2c9e03dadf15f16b1c4f570955bdcebe00e\nAdding ruby_package option to accessapproval.proto for the Ruby client libraries generation.\n\nPiperOrigin-RevId: 299955924\n\n1cc6f0a7bfb147e6f2ede911d9b01e7a9923b719\nbuild(google/maps/routes): generate api clients\n\nPiperOrigin-RevId: 299955905\n\n29a47c965aac79e3fe8e3314482ca0b5967680f0\nIncrease timeout to 1hr for method `dropRange` in bigtable/admin/v2, which is\nsynced with the timeout setting in gapic_yaml.\n\nPiperOrigin-RevId: 299917154\n\n8f631c4c70a60a9c7da3749511ee4ad432b62898\nbuild(google/maps/roads/v1op): move go to monorepo pattern\n\nPiperOrigin-RevId: 299885195\n\nd66816518844ebbf63504c9e8dfc7133921dd2cd\nbuild(google/maps/roads/v1op): Add bazel build files to generate clients.\n\nPiperOrigin-RevId: 299851148\n\n" } }, { - "template": { - "name": "node_library", - "origin": "synthtool.gcp", - "version": "2020.2.4" + "git": { + "name": "synthtool", + "remote": "https://github.com/googleapis/synthtool.git", + "sha": "38eaee81b0cc895b5db9dc0ce9b5dd19c21533c3" } } ],