diff --git a/.gitignore b/.gitignore
index 8a7c0f9d51d..65846f7f5b0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,6 @@
**/*.log
**/node_modules
+coverage
.coverage
.nyc_output
docs/json
diff --git a/.jshintignore b/.jshintignore
index deb103b74e2..f9d34369f4e 100644
--- a/.jshintignore
+++ b/.jshintignore
@@ -2,5 +2,5 @@
**/system-test/*/**/*
**/test/*/**/*
**/node_modules/
-**/coverage
+**/coverage/
**/gapic*.js
diff --git a/.nycrc b/.nycrc
new file mode 100644
index 00000000000..af787f0f65e
--- /dev/null
+++ b/.nycrc
@@ -0,0 +1,26 @@
+{
+ "report-dir": "./.coverage",
+ "exclude": [
+ "src/*{/*,/**/*}.js",
+ "src/v*/*.js",
+ "test/**/*.js"
+ ],
+ "watermarks": {
+ "branches": [
+ 95,
+ 100
+ ],
+ "functions": [
+ 95,
+ 100
+ ],
+ "lines": [
+ 95,
+ 100
+ ],
+ "statements": [
+ 95,
+ 100
+ ]
+ }
+}
diff --git a/package.json b/package.json
index c021a006351..8cc6c020ed2 100644
--- a/package.json
+++ b/package.json
@@ -13,6 +13,7 @@
"extend": "^3.0.0",
"glob": "^5.0.9",
"globby": "^3.0.1",
+ "intelli-espower-loader": "^1.0.1",
"is": "^3.1.0",
"jscs": "^2.1.1",
"jshint": "^2.9.1",
@@ -25,9 +26,11 @@
"multiline": "^1.0.2",
"nyc": "^10.3.0",
"package-json": "^2.4.0",
+ "power-assert": "^1.4.2",
"propprop": "^0.3.1",
"semver": "^5.3.0",
"shelljs": "^0.7.3",
+ "sinon": "^2.2.0",
"string-format-obj": "^1.0.0",
"string-includes": "^2.0.0",
"through2": "^2.0.0"
@@ -41,37 +44,12 @@
"remove-ghpages": "node ./scripts/docs/remove.js",
"lint": "jshint scripts/ packages/ system-test/ test/ && jscs packages/ system-test/ test/",
"test": "npm run unit-test && npm run docs && npm run snippet-test",
- "unit-test": "mocha --timeout 5000 --bail packages/*/test/*.js",
+ "unit-test": "mocha --timeout 5000 --bail --require intelli-espower-loader packages/*/test/*.js",
"snippet-test": "mocha --timeout 5000 --bail test/docs.js",
- "system-test": "mocha packages/*/system-test/*.js --no-timeouts --bail",
- "cover": "nyc --reporter=lcov --reporter=html mocha --no-timeouts packages/*/test/*.js && nyc report",
+ "system-test": "mocha packages/*/system-test/*.js --require intelli-espower-loader --no-timeouts --bail",
+ "cover": "nyc --reporter=lcov --reporter=html mocha --require intelli-espower-loader --no-timeouts packages/*/test/*.js && nyc report",
"coveralls": "npm run cover && nyc report --reporter=text-lcov | coveralls"
},
- "nyc": {
- "report-dir": "./.coverage",
- "exclude": [
- "packages/*/src/*{/*,/**/*}.js",
- "packages/*/test/**/*.js"
- ],
- "watermarks": {
- "branches": [
- 95,
- 100
- ],
- "functions": [
- 95,
- 100
- ],
- "lines": [
- 95,
- 100
- ],
- "statements": [
- 95,
- 100
- ]
- }
- },
"license": "Apache-2.0",
"engines": {
"node": ">=4.0.0"
diff --git a/packages/speech/README.md b/packages/speech/README.md
index cf0fab6e737..ad7bfc45c83 100644
--- a/packages/speech/README.md
+++ b/packages/speech/README.md
@@ -1,105 +1,59 @@
-# @google-cloud/speech ([Alpha][versioning])
-> Cloud Speech Client Library for Node.js
+# Node.js Client for Google Cloud Speech API ([Beta](https://github.com/GoogleCloudPlatform/google-cloud-node#versioning))
-*Looking for more Google APIs than just Speech? You might want to check out [`google-cloud`][google-cloud].*
+[Google Cloud Speech API][Product Documentation]: Google Cloud Speech API.
+- [Client Library Documentation][]
+- [Product Documentation][]
-- [API Documentation][gcloud-speech-docs]
-- [Official Documentation][cloud-speech-docs]
+## Quick Start
+In order to use this library, you first need to go through the following steps:
+1. [Select or create a Cloud Platform project.](https://console.cloud.google.com/project)
+2. [Enable the Google Cloud Speech API.](https://console.cloud.google.com/apis/api/speech)
+3. [Setup Authentication.](https://googlecloudplatform.github.io/google-cloud-node/#/docs/google-cloud/master/guides/authentication)
-```sh
-$ npm install --save @google-cloud/speech
+### Installation
```
-```js
-var speech = require('@google-cloud/speech')({
- projectId: 'grape-spaceship-123',
- keyFilename: '/path/to/keyfile.json'
-});
-
-// Detect the speech in an audio file.
-speech.recognize('./audio.raw', {
- encoding: 'LINEAR16',
- sampleRateHertz: 16000
-}, function(err, transcript) {
- // transcript = 'how old is the Brooklyn Bridge'
-});
-
-// Detect the speech in an audio file stream.
-fs.createReadStream('./audio.raw')
- .on('error', console.error)
- .pipe(speech.createRecognizeStream({
- config: {
- encoding: 'LINEAR16',
- sampleRateHertz: 16000
- },
- singleUtterance: false,
- interimResults: false
- }))
- .on('error', console.error)
- .on('data', function(data) {
- // data.results = "how old is the Brooklyn Bridge"
- });
-
-// Promises are also supported by omitting callbacks.
-speech.recognize('./audio.raw', {
- encoding: 'LINEAR16',
- sampleRateHertz: 16000
-}).then(function(data) {
- var transcript = data[0];
-});
-
-// It's also possible to integrate with third-party Promise libraries.
-var speech = require('@google-cloud/speech')({
- promise: require('bluebird')
-});
-```
-
-
-## Authentication
-
-It's incredibly easy to get authenticated and start using Google's APIs. You can set your credentials on a global basis as well as on a per-API basis. See each individual API section below to see how you can auth on a per-API-basis. This is useful if you want to use different accounts for different Cloud services.
-
-### On Google Cloud Platform
-
-If you are running this client on Google Cloud Platform, we handle authentication for you with no configuration. You just need to make sure that when you [set up the GCE instance][gce-how-to], you add the correct scopes for the APIs you want to access.
-
-``` js
-var speech = require('@google-cloud/speech')();
-// ...you're good to go!
+$ npm install --save @google-cloud/speech
```
-### Elsewhere
-
-If you are not running this client on Google Cloud Platform, you need a Google Developers service account. To create a service account:
-
-1. Visit the [Google Developers Console][dev-console].
-2. Create a new project or click on an existing project.
-3. Navigate to **APIs & auth** > **APIs section** and turn on the following APIs (you may need to enable billing in order to use these services):
- * Google Cloud Speech API
-4. Navigate to **APIs & auth** > **Credentials** and then:
- * If you want to use a new service account key, click on **Create credentials** and select **Service account key**. After the account key is created, you will be prompted to download the JSON key file that the library uses to authenticate your requests.
- * If you want to generate a new service account key for an existing service account, click on **Generate new JSON key** and download the JSON key file.
-
-``` js
-var projectId = process.env.GCLOUD_PROJECT; // E.g. 'grape-spaceship-123'
-
-var speech = require('@google-cloud/speech')({
- projectId: projectId,
-
- // The path to your key file:
- keyFilename: '/path/to/keyfile.json'
-
- // Or the contents of the key file:
- credentials: require('./path/to/keyfile.json')
-});
-
-// ...you're good to go!
+### Preview
+#### SpeechClient
+```js
+ var speech = require('@google-cloud/speech');
+
+ var client = speech({
+ // optional auth parameters.
+ });
+
+ var languageCode = 'en-US';
+ var sampleRateHertz = 44100;
+ var encoding = speech.v1.types.RecognitionConfig.AudioEncoding.FLAC;
+ var config = {
+ languageCode : languageCode,
+ sampleRateHertz : sampleRateHertz,
+ encoding : encoding
+ };
+ var uri = 'gs://gapic-toolkit/hello.flac';
+ var audio = {
+ uri : uri
+ };
+ var request = {
+ config: config,
+ audio: audio
+ };
+ client.recognize(request).then(function(responses) {
+ var response = responses[0];
+ // doThingsWith(response)
+ })
+ .catch(function(err) {
+ console.error(err);
+ });
```
+### Next Steps
+- Read the [Client Library Documentation][] for Google Cloud Speech API to see other available methods on the client.
+- Read the [Google Cloud Speech API Product documentation][Product Documentation] to learn more about the product and see How-to Guides.
+- View this [repository's main README](https://github.com/GoogleCloudPlatform/google-cloud-node/blob/master/README.md) to see the full list of Cloud APIs that we cover.
-[versioning]: https://github.com/GoogleCloudPlatform/google-cloud-node#versioning
-[google-cloud]: https://github.com/GoogleCloudPlatform/google-cloud-node/
-[gce-how-to]: https://cloud.google.com/compute/docs/authentication#using
-[dev-console]: https://console.developers.google.com/project
-[gcloud-speech-docs]: https://googlecloudplatform.github.io/google-cloud-node/#/docs/speech
-[cloud-speech-docs]: https://cloud.google.com/speech
+[Client Library Documentation]: https://googlecloudplatform.github.io/google-cloud-node/#/docs/speech
+[Product Documentation]: https://cloud.google.com/speech
\ No newline at end of file
diff --git a/packages/speech/package.json b/packages/speech/package.json
index a04fc9d710e..951cc20626f 100644
--- a/packages/speech/package.json
+++ b/packages/speech/package.json
@@ -1,6 +1,7 @@
{
+ "repository": "GoogleCloudPlatform/google-cloud-node",
"name": "@google-cloud/speech",
- "version": "0.9.4",
+ "version": "0.10.0",
"author": "Google Inc.",
"description": "Cloud Speech Client Library for Node.js",
"contributors": [
@@ -37,10 +38,8 @@
"files": [
"src",
"AUTHORS",
- "CONTRIBUTORS",
"COPYING"
],
- "repository": "googlecloudplatform/google-cloud-node",
"keywords": [
"google apis client",
"google api client",
@@ -51,36 +50,28 @@
"google cloud",
"cloud",
"google speech",
- "speech"
+ "speech",
+ "Google Cloud Speech API"
],
"dependencies": {
- "@google-cloud/common": "^0.13.0",
- "@google-cloud/common-grpc": "^0.3.0",
- "events-intercept": "^2.0.0",
- "extend": "^3.0.0",
- "google-gax": "^0.13.0",
"google-proto-files": "^0.12.0",
- "is": "^3.1.0",
- "propprop": "^0.3.1",
+ "google-gax": "^0.13.2",
+ "extend": "^3.0.0",
"pumpify": "^1.3.5",
- "request": "^2.74.0",
"stream-events": "^1.0.1",
- "string-format-obj": "^1.1.0",
- "through2": "^2.0.1"
+ "through2": "^2.0.3"
},
"devDependencies": {
- "@google-cloud/storage": "*",
- "async": "^2.0.1",
- "methmeth": "^1.1.0",
- "mocha": "^3.0.2",
- "proxyquire": "^1.7.10",
- "tmp": "^0.0.31",
- "uuid": "^3.0.1"
+ "intelli-espower-loader": "^1.0.1",
+ "mocha": "^3.2.0",
+ "nyc": "^10.3.0",
+ "power-assert": "^1.4.2",
+ "sinon": "^2.2.0"
},
"scripts": {
"publish-module": "node ../../scripts/publish.js speech",
- "test": "mocha test/*.js",
- "system-test": "mocha system-test/*.js --no-timeouts --bail"
+ "smoke-test": "mocha smoke-test/*.js --timeout 5000",
+ "test": "mocha test/*.js"
},
"license": "Apache-2.0",
"engines": {
diff --git a/packages/speech/smoke-test/speech_smoke_test.js b/packages/speech/smoke-test/speech_smoke_test.js
new file mode 100644
index 00000000000..7477fbaaa1b
--- /dev/null
+++ b/packages/speech/smoke-test/speech_smoke_test.js
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017, Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+'use strict';
+
+describe('SpeechSmokeTest', function() {
+
+ it('successfully makes a call to the service', function(done) {
+ var speech = require('../src');
+
+ var client = speech.v1({
+ // optional auth parameters.
+ });
+
+ var languageCode = 'en-US';
+ var sampleRateHertz = 44100;
+ var encoding = speech.v1.types.RecognitionConfig.AudioEncoding.FLAC;
+ var config = {
+ languageCode : languageCode,
+ sampleRateHertz : sampleRateHertz,
+ encoding : encoding
+ };
+ var uri = 'gs://gapic-toolkit/hello.flac';
+ var audio = {
+ uri : uri
+ };
+ var request = {
+ config: config,
+ audio: audio
+ };
+ client.recognize(request).then(function(responses) {
+ var response = responses[0];
+ console.log(response);
+ })
+ .then(done)
+ .catch(done);
+ });
+});
\ No newline at end of file
diff --git a/packages/speech/src/helpers.js b/packages/speech/src/helpers.js
new file mode 100644
index 00000000000..de464ea501b
--- /dev/null
+++ b/packages/speech/src/helpers.js
@@ -0,0 +1,120 @@
+/*!
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*!
+ * @module speech/helpers
+ */
+
+'use strict';
+
+var pumpify = require('pumpify');
+var streamEvents = require('stream-events');
+var through = require('through2');
+
+/*!
+ * Return a dictionary-like object with helpers to augment the Speech
+ * GAPIC.
+ *
+ * @return {Object} - An object with keys and functions which are placed
+ * onto the pure GAPIC.
+ */
+module.exports = () => {
+ var methods = {};
+
+ /**
+ * Performs bidirectional streaming speech recognition: receive results while
+ * sending audio. This method is only available via the gRPC API (not REST).
+ *
+ * @param {Object} config
+ * The configuration for the stream. This is appropriately wrapped and
+ * sent as the first argument. It should be an object conforming to the
+ * [StreamingRecognitionConfig]{@link StreamingRecognitionConfig}
+ * structure.
+ * @param {Object=} options
+ * Optional parameters. You can override the default settings for this
+ * call, e.g, timeout, retries, paginations, etc. See
+ * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions}
+ * for the details.
+ * @returns {Stream}
+ * An object stream which is both readable and writable. It accepts
+ * [StreamingRecognizeRequest]{@link StreamingRecognizeRequest}-like
+ * objects for the write() method, and will emit objects representing
+ * [StreamingRecognizeResponse]{@link StreamingRecognizeResponse} on the
+ * 'data' event asynchronously.
+ *
+ * @example
+ *
+ * var client = require('@google-cloud/speech')();
+ *
+ * var stream = client.streamingRecognize({
+ * config: {
+ * encoding: 'LINEAR16',
+ * languageCode: 'en-us',
+ * sampleRateHertz: 44100,
+ * },
+ * }).on('data', function(response) {
+ * // doThingsWith(response);
+ * });
+ * var request = {};
+ * // Write request objects.
+ * stream.write(request);
+ */
+ methods.streamingRecognize = function(config, options) {
+ if (options === undefined) {
+ options = {};
+ }
+
+ var requestStream = this._streamingRecognize(options);
+
+ // Format the audio content as input request for pipeline
+ var recognizeStream = streamEvents(pumpify.obj());
+
+ recognizeStream.once('writing', function() {
+ requestStream.on('error', function(err) {
+ recognizeStream.destroy(err);
+ });
+
+ requestStream.on('response', function(response) {
+ recognizeStream.emit('response', response);
+ });
+
+ // Write the initial configuration to the stream,
+ requestStream.write({
+ streamingConfig: config
+ });
+
+ this.setPipeline([
+ // Format the user's input.
+ through.obj(function(obj, _, next) {
+ next(null, {
+ audioContent: obj
+ });
+ }),
+
+ requestStream,
+
+ // Pass through the results.
+ through.obj(function(obj, _, next) {
+ next(null, obj);
+ })
+ ]);
+ });
+
+ return recognizeStream;
+ };
+
+ return methods;
+};
diff --git a/packages/speech/src/index.js b/packages/speech/src/index.js
index e590af6f7f6..d55e6a74498 100644
--- a/packages/speech/src/index.js
+++ b/packages/speech/src/index.js
@@ -1,11 +1,11 @@
-/*!
- * Copyright 2016 Google Inc. All Rights Reserved.
+/*
+ * Copyright 2017, Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,834 +16,57 @@
/*!
* @module speech
+ * @name Speech
*/
'use strict';
-var common = require('@google-cloud/common');
-var commonGrpc = require('@google-cloud/common-grpc');
-var eventsIntercept = require('events-intercept');
var extend = require('extend');
-var format = require('string-format-obj');
-var fs = require('fs');
-var googleProtoFiles = require('google-proto-files');
-var is = require('is');
-var path = require('path');
-var prop = require('propprop');
-var pumpify = require('pumpify');
-var request = require('request');
-var streamEvents = require('stream-events');
-var through = require('through2');
-var util = require('util');
-var v1 = require('./v1');
+var gapic = {
+ v1: require('./v1')
+};
+var gaxGrpc = require('google-gax').grpc();
+var helpers = require('./helpers');
+
+const VERSION = require('../package.json').version;
/**
- * The [Cloud Speech API](https://cloud.google.com/speech/docs) enables easy
- * integration of Google speech recognition technologies into developer
- * applications. Send audio and receive a text transcription from the Cloud
- * Speech API service.
+ * Create an speechClient with additional helpers for common
+ * tasks.
+ *
+ * Service that implements Google Cloud Speech API.
*
* @constructor
* @alias module:speech
+ * @mixes module:speech/helpers
*
- * @classdesc
- * To learn more about the Speech API, see the
- * [Getting Started guide](https://cloud.google.com/speech/docs/getting-started).
- *
- * @resource [Getting Started]{@link https://cloud.google.com/speech/docs/getting-started}
- * @resource [Speech Best Practices]{@link https://cloud.google.com/speech/docs/best-practices}
- *
- * @param {object} options - [Configuration object](#/docs).
+ * @param {object=} options - [Configuration object](#/docs).
+ * @param {number=} options.port - The port on which to connect to
+ * the remote host.
+ * @param {string=} options.servicePath - The domain name of the
+ * API remote host.
*/
-function Speech(options) {
- if (!(this instanceof Speech)) {
- options = common.util.normalizeArguments(this, options);
- return new Speech(options);
- }
-
+function speechV1(options) {
+ // Define the header options.
options = extend({}, options, {
libName: 'gccl',
- libVersion: require('../package.json').version
+ libVersion: VERSION
});
- this.api = {
- Speech: v1(options).speechClient(options)
- };
-
- var config = {
- baseUrl: 'speech.googleapis.com',
- projectIdRequired: false,
- service: 'speech',
- protoServices: {
- Operations: {
- path: googleProtoFiles('longrunning', 'operations.proto'),
- service: 'longrunning'
- }
- },
- scopes: [
- 'https://www.googleapis.com/auth/cloud-platform'
- ],
- packageJson: require('../package.json')
- };
-
- commonGrpc.Service.call(this, config, options);
+ // Create the speech client with the provided options.
+ var client = gapic.v1(options).speechClient(options);
+ Object.assign(client.constructor.prototype, helpers());
+ return client;
}
-util.inherits(Speech, commonGrpc.Service);
-
-/**
- * The event types that the Speech API will return while processing a
- * {module:speech#createRecognizeStream} request. You can track the progress of
- * audio recognition by comparing the `data.eventType` property with these
- * values.
- *
- * - `Speech.eventTypes.ENDPOINTER_EVENT_UNSPECIFIED`: No event specified.
- * - `Speech.eventTypes.END_OF_SINGLE_UTTERANCE`: This event is only sent when
- * `config.singleUtterance` passed to {module:speech#createRecognizeStream}
- * is `true`. It indicates that the server has detected the end of the
- * user's speech utterance and expects no additional speech. Therefore, the
- * server will not process additional audio. The client should stop sending
- * additional audio data.
- *
- * @type {object}
- */
-Speech.eventTypes =
-Speech.prototype.eventTypes = {
- END_OF_SINGLE_UTTERANCE: 'END_OF_SINGLE_UTTERANCE',
- ENDPOINTER_EVENT_UNSPECIFIED: 'ENDPOINTER_EVENT_UNSPECIFIED'
-};
-
-/**
- * Guess the audio encoding from the file's extension.
- *
- * @resource [AudioEncoding API Documentation]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.AudioEncoding}
- * @private
- *
- * @throws {Error} If an encoding type could not be determined from the file's
- * extension.
- *
- * @param {string} filename - The name of the file.
- * @returns {string} The audio encoding.
- */
-Speech.detectEncoding_ = function(filename) {
- if (!is.string(filename)) {
- return;
- }
-
- switch (path.extname(filename).toLowerCase()) {
- case '.raw': {
- return 'LINEAR16';
- }
- case '.amr': {
- return 'AMR';
- }
- case '.awb': {
- return 'AMR_WB';
- }
- case '.flac': {
- return 'FLAC';
- }
- case '.au':
- case '.wav': {
- return 'MULAW';
- }
- default: {
- throw new Error('Encoding could not be determined for file: ' + filename);
- }
- }
-};
-
-/**
- * Determine the type of file the user is asking to be processed. If a
- * {module:storage/file}, convert to its "gs://{bucket}/{file}" URL. If a remote
- * URL, read the contents. If a file path, load the file.
- *
- * @private
- */
-Speech.findFile_ = function(file, callback) {
- if (global.GCLOUD_SANDBOX_ENV) {
- callback(null, {
- content: new Buffer('')
- });
- return;
- }
-
- if (common.util.isCustomType(file, 'storage/file')) {
- // File is an instance of module:storage/file.
- callback(null, {
- uri: format('gs://{bucketName}/{fileName}', {
- bucketName: file.bucket.name,
- fileName: file.name
- })
- });
- return;
- }
-
- if (is.string(file) && file.indexOf('gs://') === 0) {
- // File is a Cloud Storage URI.
- callback(null, {
- uri: file
- });
- return;
- }
-
- if (/^http/.test(file)) {
- // File is a URL.
- request({
- uri: file,
- encoding: null
- }, function(err, resp, body) {
- if (err) {
- callback(err);
- return;
- }
-
- callback(null, {
- content: body
- });
- });
- return;
- }
-
- if (Buffer.isBuffer(file)) {
- callback(null, {
- content: file
- });
- return;
- }
-
- if (is.object(file)) {
- // This might be a RecognitionAudio object.
- if (!file.content && !file.uri) {
- var errorMsg = 'RecognitionAudio requires a "content" or "uri" property.';
- callback(new Error(errorMsg));
- } else {
- callback(null, file);
- }
- return;
- }
-
- // File exists on disk.
- fs.readFile(file, function(err, contents) {
- if (err) {
- callback(err);
- return;
- }
-
- callback(null, {
- content: contents
- });
- });
-};
-
-/**
- * Simplify the transcription results from the API.
- *
- * @resource [SpeechRecognitionResult API Documentation]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.SpeechRecognitionResult}
- * @resource [StreamingRecognitionResult API Documentation]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.StreamingRecognitionResult}
- *
- * @private
- *
- * @param {object} resultSets - A `SpeechRecognitionResult` or
- * `StreamingRecognitionResult` object.
- * @param {boolean} verbose - Whether to use verbose mode.
- * @return {object} - The simplified results.
- *
- * @example
- * var resultSets = [
- * {
- * alternatives: [
- * {
- * transcript: 'Result 1a',
- * confidence: 0.70
- * },
- * {
- * transcript: 'Result 1b',
- * confidence: 0.60
- * },
- * ...
- * ]
- * },
- * {
- * alternatives: [
- * {
- * transcript: 'Result 2a',
- * confidence: 0.90
- * },
- * {
- * transcript: 'Result 2b',
- * confidence: 0.80
- * },
- * ...
- * ]
- * }
- * ];
- *
- * //-
- * // Default output.
- * //-
- * Speech.formatResults_(resultSets);
- * // 'Result 1a Result 2a'
- *
- * //-
- * // Verbose output.
- * //-
- * Speech.formatResults_(resultSets, true);
- * // [
- * // {
- * // transcript: 'Result 1a',
- * // confidence: 70,
- * // alternatives: [
- * // {
- * // transcript: 'Result 1b',
- * // confidence: 60
- * // },
- * // ...
- * // ]
- * // },
- * // {
- * // transcript: 'Result 2a',
- * // confidence: 90,
- * // alternatives: [
- * // {
- * // transcript: 'Result 2b',
- * // confidence: 80
- * // },
- * // ...
- * // ]
- * // }
- * // ]
- */
-Speech.formatResults_ = function(resultSets, verboseMode) {
- function multiplyScores(result) {
- if (is.defined(result.confidence)) {
- result.confidence *= 100;
- }
-
- if (is.defined(result.stability)) {
- result.stability *= 100;
- }
-
- return result;
- }
-
- var verboseResultSets = resultSets
- .map(function(resultSet) {
- resultSet = extend(true, {}, resultSet);
-
- var mostProbableResult = multiplyScores(resultSet.alternatives.shift());
-
- resultSet.transcript = mostProbableResult.transcript;
-
- if (is.defined(mostProbableResult.confidence)) {
- resultSet.confidence = mostProbableResult.confidence;
- }
-
- if (is.defined(mostProbableResult.stability)) {
- resultSet.stability = mostProbableResult.stability;
- }
-
- resultSet.alternatives = resultSet.alternatives.map(multiplyScores);
-
- return resultSet;
- });
-
- if (!verboseMode) {
- return verboseResultSets.map(prop('transcript')).join(' ');
- }
-
- return verboseResultSets;
-};
-
-/**
- * Perform bidirectional streaming speech-recognition: receive results while
- * sending audio.
- *
- * Each emitted `data` event is a
- * [`StreamingRecognizeResponse`](https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.StreamingRecognizeResponse)
- * object, containing these properties:
- *
- * - **`eventType`** See {module:speech#eventTypes}.
- * - **`results`** By default, a combined string of transcripts. When
- * `config.verbose` is enabled, this is an object including a `transcript`
- * property, a `confidence` score from `0` - `100`, and an `alternatives`
- * array consisting of other transcription possibilities.
- *
- * Cloud Speech sets the limits for the audio duration. For more
- * information, see
- * [Content Limits]{@link https://cloud.google.com/speech/limits#content}.
- *
- * @resource [StreamingRecognize API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.Speech.StreamingRecognize}
- * @resource [StreamingRecognizeRequest API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.StreamingRecognizeRequest}
- * @resource [Content Limits]{@link https://cloud.google.com/speech/limits#content}
- *
- * @param {object} config - A `StreamingRecognitionConfig` object. See
- * [`StreamingRecognitionConfig`](https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.StreamingRecognitionConfig).
- * @param {string} config.languageCode - The language of the supplied audio as
- * [BCP-47 language tag](http://bit.ly/1ZHeENX). Example: 'en-US'.
- * @param {number=} config.timeout - In seconds, the amount of time before the
- * underlying API request times out. The default value, `190`, is sufficient
- * for audio input of 60 seconds or less. If your input is longer, consider
- * using a higher timeout value.
- * @param {boolean=} config.verbose - Enable verbose mode for a more detailed
- * response. See the examples below. Default: `false`.
- *
- * @example
- * var fs = require('fs');
- *
- * //-
- * // See
- * // `StreamingRecognizeRequest` for all of the available configuration
- * // options.
- * //-
- * var request = {
- * config: {
- * encoding: 'LINEAR16',
- * languageCode: 'en-US',
- * sampleRateHertz: 16000
- * },
- * singleUtterance: false,
- * interimResults: false
- * };
- *
- * fs.createReadStream('./bridge.raw')
- * .on('error', console.error)
- * .pipe(speech.createRecognizeStream(request))
- * .on('error', console.error)
- * .on('data', function(data) {
- * // data.results = "how old is the Brooklyn Bridge"
- * });
- *
- * //-
- * // Enable verbose mode for more detailed results.
- * //-
- * var request = {
- * config: {
- * encoding: 'LINEAR16',
- * languageCode: 'en-US',
- * sampleRateHertz: 16000
- * },
- * singleUtterance: false,
- * interimResults: false,
- * verbose: true
- * };
- *
- * fs.createReadStream('./system-test/data/bridge.raw')
- * .on('error', console.error)
- * .pipe(speech.createRecognizeStream(request))
- * .on('error', console.error)
- * .on('data', function(data) {
- * // data.results = "how old is the Brooklyn Bridge"
- * });
- */
-Speech.prototype.createRecognizeStream = function(config) {
- var self = this;
-
- if (!config) {
- throw new Error('A recognize request requires a configuration object.');
- }
-
- config = extend(true, {
- config: {}
- }, config);
-
- // As of Speech v1, a language code is required; throw an exception if we did
- // not receive one.
- if (config.languageCode) {
- config.config.languageCode = config.languageCode;
- delete config.languageCode;
- }
-
- if (!config.config.languageCode) {
- throw new Error('A `languageCode` is required in the config object.');
- }
-
- var verboseMode = config.verbose === true;
- delete config.verbose;
-
- var gaxOptions = {};
-
- if (is.number(config.timeout)) {
- gaxOptions.timeout = config.timeout * 1000;
- delete config.timeout;
- }
+var v1Protos = {};
- var recognizeStream = streamEvents(pumpify.obj());
-
- recognizeStream.once('writing', function() {
- var requestStream = self.api.Speech.streamingRecognize(gaxOptions);
-
- requestStream.on('error', function(err) {
- recognizeStream.destroy(err);
- });
-
- requestStream.on('response', function(response) {
- recognizeStream.emit('response', response);
- });
-
- requestStream.write({
- streamingConfig: config
- });
-
- this.setPipeline([
- // Format the user's input.
- through.obj(function(obj, _, next) {
- next(null, {
- audioContent: obj
- });
- }),
-
- requestStream,
-
- // Format the results.
- through.obj(function(obj, _, next) {
- obj.results = Speech.formatResults_(obj.results, verboseMode);
- next(null, obj);
- })
- ]);
- });
-
- return recognizeStream;
-};
-
-/*! Developer Documentation
- *
- * @returns {module:commonGrpc/Operation}
- */
-/**
- * Get a reference to an existing operation.
- *
- * @throws {Error} If a name is not provided.
- *
- * @param {string} name - The name of the operation.
- *
- * @example
- * var operation = speech.operation('68850831366825');
- */
-Speech.prototype.operation = function(name) {
- if (!name) {
- throw new Error('A name must be specified for an operation.');
- }
-
- return new commonGrpc.Operation(this, name);
-};
-
-/**
- * Perform synchronous speech recognition and receive results after all audio
- * has been sent and processed. This is ideal for files 1 MB or below. For
- * larger files, you will need to use {module:speech#startRecognition} or
- * {module:speech#createRecognizeStream}.
- *
- * @resource [Recognize API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.Speech.Recognize}
- * @resource [RecognizeRequest API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.RecognizeRequest}
- *
- * @param {string|object|module:storage/file} file - The source file to run the
- * detection on. It can be either a local file path, a remote file URL, a
- * Cloud Storage URI, a Cloud Storage File object, or a
- * [`RecognitionAudio`](https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.RecognitionAudio)
- * object.
- * @param {object} config - A `RecognitionConfig` object. See
- * [`RecognitionConfig`](https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.RecognitionConfig).
- * @param {string} config.languageCode - The language of the supplied audio as
- * [BCP-47 language tag](http://bit.ly/1ZHeENX). Example: 'en-US'.
- * @param {boolean=} config.verbose - Enable verbose mode for a more detailed
- * response. See the examples below. Default: `false`.
- * @param {function} callback - The callback function.
- * @param {?error} callback.err - An error returned while making this request.
- * @param {string|object[]} callback.results - By default, this will be a string
- * comprised of all of the transcriptions recognized from the audio. If
- * `config.verbose` is enabled, this is an object including a `transcript`
- * property, a `confidence` score from `0` - `100`, and an `alternatives`
- * array consisting of other transcription possibilities. See the examples
- * below for more.
- * @param {object} callback.apiResponse - Raw API response. See
- * [`RecognizeResponse`](https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.RecognizeResponse).
- *
- * @example
- * var config = {
- * encoding: 'LINEAR16',
- * languageCode: 'en-US',
- * sampleRateHertz: 16000
- * };
- *
- * function callback(err, transcript, apiResponse) {
- * if (err) {
- * // Error handling omitted.
- * }
- *
- * // transcript = "how old is the Brooklyn Bridge"
- * }
- *
- * //-
- * // Run speech detection over a local file.
- * //-
- * speech.recognize('./bridge.raw', config, callback);
- *
- * //-
- * // Run speech recognition over a file in Cloud Storage.
- * //-
- * speech.recognize('gs://your-bucket-name/bridge.raw', config, callback);
- *
- * //-
- * // Run speech recognition over raw file contents.
- * //-
- * speech.recognize({
- * content: fs.readFileSync('./bridge.raw')
- * }, config, callback);
- *
- * //-
- * // Run speech recognition over a remote file.
- * //
- * // Note: This is not an officially supported feature of the Speech API.
- * // This library will make a request to the URL given and send the file
- * // contents to the upstream API.
- * //-
- * speech.recognize('https://example.com/files/bridge.raw', config, callback);
- *
- * //-
- * // Enable verbose mode for more detailed results.
- * //-
- * var config = {
- * encoding: 'LINEAR16',
- * languageCode: 'en-US',
- * sampleRateHertz: 16000,
- * verbose: true
- * };
- *
- * speech.recognize('./bridge.raw', config, function(err, results) {
- * if (err) {
- * // Error handling omitted.
- * }
- *
- * // results = [
- * // {
- * // transcript: "how old is the Brooklyn Bridge",
- * // confidence: 88.15,
- * // alternatives: [
- * // {
- * // transcript: "how old is the Brooklyn brim",
- * // confidence: 22.39
- * // }
- * // ]
- * // }
- * // ]
- * });
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * speech.recognize('./bridge.raw', config).then(function(data) {
- * var results = data[0];
- * var apiResponse = data[1];
- * });
- */
-Speech.prototype.recognize = function(file, config, callback) {
- var self = this;
-
- if (!is.object(config)) {
- throw new Error('A recognize request requires a configuration object.');
- }
-
- config = extend(true, {}, config);
-
- // As of Speech v1, a language code is required; throw an exception if we
- // did not receive one.
- if (is.undefined(config.languageCode)) {
- throw new Error('A `languageCode` is required in the config object.');
- }
-
- if (!config.encoding) {
- config.encoding = Speech.detectEncoding_(file);
- }
-
- var verboseMode = config.verbose === true;
- delete config.verbose;
-
- Speech.findFile_(file, function(err, foundFile) {
- if (err) {
- callback(err);
- return;
- }
-
- self.api.Speech.recognize({
- config: config,
- audio: foundFile
- }, function(err, resp) {
- if (err) {
- callback(err, null, resp);
- return;
- }
-
- var results = Speech.formatResults_(resp.results, verboseMode);
-
- callback(null, results, resp);
- });
- });
-};
-
-/**
- * Perform asynchronous speech recognition.
- *
- * This method sends audio to the Speech API, which immediately responds with an
- * Operation object. Register event handlers for the "error" and "complete"
- * events to see how the operation finishes. Follow along with the examples
- * below.
- *
- * @resource [LongRunningRecognize API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.Speech.LongRunningRecognize}
- * @resource [LongRunningRecognizeRequest API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.LongRunningRecognizeRequest}
- * @resource [LongRunningRecognizeResponse API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.LongRunningRecognizeResponse}
- *
- * @param {string|object|module:storage/file} file - The source file to run the
- * detection on. It can be either a local file path, a remote file URL, a
- * Cloud Storage URI, a Cloud Storage File object, or a
- * [`RecognitionAudio`](https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.RecognitionAudio)
- * object.
- * @param {object} config - A `RecognitionConfig` object. See
- * [`RecognitionConfig`](https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.RecognitionConfig).
- * @param {boolean=} config.verbose - Enable verbose mode for a more detailed
- * response. See the examples below. Default: `false`.
- * @param {string} config.languageCode - The language of the supplied audio as
- * [BCP-47 language tag](http://bit.ly/1ZHeENX). Example: 'en-US'.
- * @param {function} callback - The callback function.
- * @param {?error} callback.err - An error returned while making this request.
- * @param {module:speech/operation} callback.operation - An operation object
- * that can be used to check the status of the request.
- * @param {object} callback.apiResponse - Raw API response.
- *
- * @example
- * var config = {
- * encoding: 'LINEAR16',
- * languageCode: 'en-US',
- * sampleRateHertz: 16000
- * };
- *
- * function callback(err, operation, apiResponse) {
- * if (err) {
- * // Error handling omitted.
- * }
- *
- * operation
- * .on('error', function(err) {})
- * .on('complete', function(transcript) {
- * // transcript = "how old is the Brooklyn Bridge"
- * });
- * }
- *
- * //-
- * // Run speech detection over a local file.
- * //-
- * speech.startRecognition('./bridge.raw', config, callback);
- *
- * //-
- * // Run speech detection over a file in Cloud Storage.
- * //-
- * var file = 'gs://your-bucket-name/bridge.raw';
- * speech.startRecognition(file, config, callback);
- *
- * //-
- * // Run speech detection over raw file contents.
- * //-
- * speech.startRecognition({
- * content: fs.readFileSync('./bridge.raw')
- * }, config, callback);
- *
- * //-
- * // Run speech detection over a remote file.
- * //
- * // Note: This is not an officially supported feature of the Speech API.
- * // This library will make a request to the URL given and send the file
- * // contents to the upstream API.
- * //-
- * var file = 'https://example.com/files/bridge.raw';
- *
- * speech.startRecognition(file, config, callback);
- *
- * //-
- * // Enable verbose mode for more detailed results.
- * //-
- * var config = {
- * encoding: 'LINEAR16',
- * languageCode: 'en-US',
- * sampleRateHertz: 16000,
- * verbose: true
- * };
- *
- * speech.startRecognition('./bridge.raw', config, function(err, operation) {
- * if (err) {
- * // Error handling omitted.
- * }
- *
- * operation
- * .on('error', function(err) {})
- * .on('complete', function(results) {
- * // results = [
- * // {
- * // transcript: "how old is the Brooklyn Bridge",
- * // confidence: 88.15
- * // }
- * // ]
- * });
- * });
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * speech.startRecognition('./bridge.raw', config).then(function(data) {
- * var operation = data[0];
- * var apiResponse = data[1];
- * });
- */
-Speech.prototype.startRecognition = function(file, config, callback) {
- var self = this;
-
- config = extend(true, {}, config);
-
- // As of Speech v1, a language code is required; throw an exception if we
- // did not receive one.
- if (is.undefined(config.languageCode)) {
- throw new Error('A `languageCode` is required in the config object.');
- }
-
- if (!config.encoding) {
- config.encoding = Speech.detectEncoding_(file);
- }
-
- var verboseMode = config.verbose === true;
- delete config.verbose;
-
- Speech.findFile_(file, function(err, foundFile) {
- if (err) {
- callback(err);
- return;
- }
-
- self.api.Speech.longRunningRecognize({
- config: config,
- audio: foundFile
- }, function(err, operation, resp) {
- if (err) {
- callback(err, null, resp);
- return;
- }
-
- eventsIntercept.patch(operation);
- operation.intercept('complete', function(result, meta, resp, callback) {
- callback(null, Speech.formatResults_(result.results, verboseMode));
- });
-
- callback(null, operation, resp);
- });
- });
-};
-
-/*! Developer Documentation
- *
- * All async methods (except for streams) will return a Promise in the event
- * that a callback is omitted.
- */
-common.util.promisifyAll(Speech, {
- exclude: ['operation']
-});
+extend(v1Protos, gaxGrpc.load([{
+ root: require('google-proto-files')('..'),
+ file: 'google/cloud/speech/v1/cloud_speech.proto'
+}]).google.cloud.speech.v1);
-module.exports = Speech;
-module.exports.v1 = v1;
+module.exports = speechV1;
+module.exports.types = v1Protos;
+module.exports.v1 = speechV1;
+module.exports.v1.types = v1Protos;
diff --git a/packages/speech/src/v1/doc/doc_cloud_speech.js b/packages/speech/src/v1/doc/doc_cloud_speech.js
new file mode 100644
index 00000000000..0438652ccb2
--- /dev/null
+++ b/packages/speech/src/v1/doc/doc_cloud_speech.js
@@ -0,0 +1,534 @@
+/*
+ * Copyright 2017, Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Note: this file is purely for documentation. Any contents are not expected
+ * to be loaded as the JS file.
+ */
+
+/**
+ * The top-level message sent by the client for the `Recognize` method.
+ *
+ * @property {Object} config
+ * *Required* Provides information to the recognizer that specifies how to
+ * process the request.
+ *
+ * This object should have the same structure as [RecognitionConfig]{@link RecognitionConfig}
+ *
+ * @property {Object} audio
+ * *Required* The audio data to be recognized.
+ *
+ * This object should have the same structure as [RecognitionAudio]{@link RecognitionAudio}
+ *
+ * @class
+ * @see [google.cloud.speech.v1.RecognizeRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var RecognizeRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * The top-level message sent by the client for the `LongRunningRecognize`
+ * method.
+ *
+ * @property {Object} config
+ * *Required* Provides information to the recognizer that specifies how to
+ * process the request.
+ *
+ * This object should have the same structure as [RecognitionConfig]{@link RecognitionConfig}
+ *
+ * @property {Object} audio
+ * *Required* The audio data to be recognized.
+ *
+ * This object should have the same structure as [RecognitionAudio]{@link RecognitionAudio}
+ *
+ * @class
+ * @see [google.cloud.speech.v1.LongRunningRecognizeRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var LongRunningRecognizeRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * The top-level message sent by the client for the `StreamingRecognize` method.
+ * Multiple `StreamingRecognizeRequest` messages are sent. The first message
+ * must contain a `streaming_config` message and must not contain `audio` data.
+ * All subsequent messages must contain `audio` data and must not contain a
+ * `streaming_config` message.
+ *
+ * @property {Object} streamingConfig
+ * Provides information to the recognizer that specifies how to process the
+ * request. The first `StreamingRecognizeRequest` message must contain a
+ * `streaming_config` message.
+ *
+ * This object should have the same structure as [StreamingRecognitionConfig]{@link StreamingRecognitionConfig}
+ *
+ * @property {string} audioContent
+ * The audio data to be recognized. Sequential chunks of audio data are sent
+ * in sequential `StreamingRecognizeRequest` messages. The first
+ * `StreamingRecognizeRequest` message must not contain `audio_content` data
+ * and all subsequent `StreamingRecognizeRequest` messages must contain
+ * `audio_content` data. The audio bytes must be encoded as specified in
+ * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+ * pure binary representation (not base64). See
+ * [audio limits](https://cloud.google.com/speech/limits#content).
+ *
+ * @class
+ * @see [google.cloud.speech.v1.StreamingRecognizeRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var StreamingRecognizeRequest = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Provides information to the recognizer that specifies how to process the
+ * request.
+ *
+ * @property {Object} config
+ * *Required* Provides information to the recognizer that specifies how to
+ * process the request.
+ *
+ * This object should have the same structure as [RecognitionConfig]{@link RecognitionConfig}
+ *
+ * @property {boolean} singleUtterance
+ * *Optional* If `false` or omitted, the recognizer will perform continuous
+ * recognition (continuing to wait for and process audio even if the user
+ * pauses speaking) until the client closes the input stream (gRPC API) or
+ * until the maximum time limit has been reached. May return multiple
+ * `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
+ *
+ * If `true`, the recognizer will detect a single spoken utterance. When it
+ * detects that the user has paused or stopped speaking, it will return an
+ * `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
+ * more than one `StreamingRecognitionResult` with the `is_final` flag set to
+ * `true`.
+ *
+ * @property {boolean} interimResults
+ * *Optional* If `true`, interim results (tentative hypotheses) may be
+ * returned as they become available (these interim results are indicated with
+ * the `is_final=false` flag).
+ * If `false` or omitted, only `is_final=true` result(s) are returned.
+ *
+ * @class
+ * @see [google.cloud.speech.v1.StreamingRecognitionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var StreamingRecognitionConfig = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Provides information to the recognizer that specifies how to process the
+ * request.
+ *
+ * @property {number} encoding
+ * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
+ *
+ * The number should be among the values of [AudioEncoding]{@link AudioEncoding}
+ *
+ * @property {number} sampleRateHertz
+ * *Required* Sample rate in Hertz of the audio data sent in all
+ * `RecognitionAudio` messages. Valid values are: 8000-48000.
+ * 16000 is optimal. For best results, set the sampling rate of the audio
+ * source to 16000 Hz. If that's not possible, use the native sample rate of
+ * the audio source (instead of re-sampling).
+ *
+ * @property {string} languageCode
+ * *Required* The language of the supplied audio as a
+ * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
+ * Example: "en-US".
+ * See [Language Support](https://cloud.google.com/speech/docs/languages)
+ * for a list of the currently supported language codes.
+ *
+ * @property {number} maxAlternatives
+ * *Optional* Maximum number of recognition hypotheses to be returned.
+ * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
+ * within each `SpeechRecognitionResult`.
+ * The server may return fewer than `max_alternatives`.
+ * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
+ * one. If omitted, will return a maximum of one.
+ *
+ * @property {boolean} profanityFilter
+ * *Optional* If set to `true`, the server will attempt to filter out
+ * profanities, replacing all but the initial character in each filtered word
+ * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
+ * won't be filtered out.
+ *
+ * @property {Object[]} speechContexts
+ * *Optional* A means to provide context to assist the speech recognition.
+ *
+ * This object should have the same structure as [SpeechContext]{@link SpeechContext}
+ *
+ * @class
+ * @see [google.cloud.speech.v1.RecognitionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var RecognitionConfig = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+
+ /**
+ * Audio encoding of the data sent in the audio message. All encodings support
+ * only 1 channel (mono) audio. Only `FLAC` includes a header that describes
+ * the bytes of audio that follow the header. The other encodings are raw
+ * audio bytes with no header.
+ *
+ * For best results, the audio source should be captured and transmitted using
+ * a lossless encoding (`FLAC` or `LINEAR16`). Recognition accuracy may be
+ * reduced if lossy codecs, which include the other codecs listed in
+ * this section, are used to capture or transmit the audio, particularly if
+ * background noise is present.
+ *
+ * @enum {number}
+ */
+ AudioEncoding: {
+
+ /**
+ * Not specified. Will return result {@link google.rpc.Code.INVALID_ARGUMENT}.
+ */
+ ENCODING_UNSPECIFIED: 0,
+
+ /**
+ * Uncompressed 16-bit signed little-endian samples (Linear PCM).
+ */
+ LINEAR16: 1,
+
+ /**
+ * [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
+ * Codec) is the recommended encoding because it is
+ * lossless--therefore recognition is not compromised--and
+ * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
+ * encoding supports 16-bit and 24-bit samples, however, not all fields in
+ * `STREAMINFO` are supported.
+ */
+ FLAC: 2,
+
+ /**
+ * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
+ */
+ MULAW: 3,
+
+ /**
+ * Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
+ */
+ AMR: 4,
+
+ /**
+ * Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
+ */
+ AMR_WB: 5,
+
+ /**
+ * Opus encoded audio frames in Ogg container
+ * ([OggOpus](https://wiki.xiph.org/OggOpus)).
+ * `sample_rate_hertz` must be 16000.
+ */
+ OGG_OPUS: 6,
+
+ /**
+ * Although the use of lossy encodings is not recommended, if a very low
+ * bitrate encoding is required, `OGG_OPUS` is highly preferred over
+ * Speex encoding. The [Speex](https://speex.org/) encoding supported by
+ * Cloud Speech API has a header byte in each block, as in MIME type
+ * `audio/x-speex-with-header-byte`.
+ * It is a variant of the RTP Speex encoding defined in
+ * [RFC 5574](https://tools.ietf.org/html/rfc5574).
+ * The stream is a sequence of blocks, one block per RTP packet. Each block
+ * starts with a byte containing the length of the block, in bytes, followed
+ * by one or more frames of Speex data, padded to an integral number of
+ * bytes (octets) as specified in RFC 5574. In other words, each RTP header
+ * is replaced with a single byte containing the block length. Only Speex
+ * wideband is supported. `sample_rate_hertz` must be 16000.
+ */
+ SPEEX_WITH_HEADER_BYTE: 7
+ }
+};
+
+/**
+ * Provides "hints" to the speech recognizer to favor specific words and phrases
+ * in the results.
+ *
+ * @property {string[]} phrases
+ * *Optional* A list of strings containing words and phrases "hints" so that
+ * the speech recognition is more likely to recognize them. This can be used
+ * to improve the accuracy for specific words and phrases, for example, if
+ * specific commands are typically spoken by the user. This can also be used
+ * to add additional words to the vocabulary of the recognizer. See
+ * [usage limits](https://cloud.google.com/speech/limits#content).
+ *
+ * @class
+ * @see [google.cloud.speech.v1.SpeechContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var SpeechContext = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Contains audio data in the encoding specified in the `RecognitionConfig`.
+ * Either `content` or `uri` must be supplied. Supplying both or neither
+ * returns {@link google.rpc.Code.INVALID_ARGUMENT}. See
+ * [audio limits](https://cloud.google.com/speech/limits#content).
+ *
+ * @property {string} content
+ * The audio data bytes encoded as specified in
+ * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+ * pure binary representation, whereas JSON representations use base64.
+ *
+ * @property {string} uri
+ * URI that points to a file that contains audio data bytes as specified in
+ * `RecognitionConfig`. Currently, only Google Cloud Storage URIs are
+ * supported, which must be specified in the following format:
+ * `gs://bucket_name/object_name` (other URI formats return
+ * {@link google.rpc.Code.INVALID_ARGUMENT}). For more information, see
+ * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+ *
+ * @class
+ * @see [google.cloud.speech.v1.RecognitionAudio definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var RecognitionAudio = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * The only message returned to the client by the `Recognize` method. It
+ * contains the result as zero or more sequential `SpeechRecognitionResult`
+ * messages.
+ *
+ * @property {Object[]} results
+ * *Output-only* Sequential list of transcription results corresponding to
+ * sequential portions of audio.
+ *
+ * This object should have the same structure as [SpeechRecognitionResult]{@link SpeechRecognitionResult}
+ *
+ * @class
+ * @see [google.cloud.speech.v1.RecognizeResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var RecognizeResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * The only message returned to the client by the `LongRunningRecognize` method.
+ * It contains the result as zero or more sequential `SpeechRecognitionResult`
+ * messages. It is included in the `result.response` field of the `Operation`
+ * returned by the `GetOperation` call of the `google::longrunning::Operations`
+ * service.
+ *
+ * @property {Object[]} results
+ * *Output-only* Sequential list of transcription results corresponding to
+ * sequential portions of audio.
+ *
+ * This object should have the same structure as [SpeechRecognitionResult]{@link SpeechRecognitionResult}
+ *
+ * @class
+ * @see [google.cloud.speech.v1.LongRunningRecognizeResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var LongRunningRecognizeResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Describes the progress of a long-running `LongRunningRecognize` call. It is
+ * included in the `metadata` field of the `Operation` returned by the
+ * `GetOperation` call of the `google::longrunning::Operations` service.
+ *
+ * @property {number} progressPercent
+ * Approximate percentage of audio processed thus far. Guaranteed to be 100
+ * when the audio is fully processed and the results are available.
+ *
+ * @property {Object} startTime
+ * Time when the request was received.
+ *
+ * This object should have the same structure as [google.protobuf.Timestamp]{@link external:"google.protobuf.Timestamp"}
+ *
+ * @property {Object} lastUpdateTime
+ * Time of the most recent processing update.
+ *
+ * This object should have the same structure as [google.protobuf.Timestamp]{@link external:"google.protobuf.Timestamp"}
+ *
+ * @class
+ * @see [google.cloud.speech.v1.LongRunningRecognizeMetadata definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var LongRunningRecognizeMetadata = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * `StreamingRecognizeResponse` is the only message returned to the client by
+ * `StreamingRecognize`. A series of one or more `StreamingRecognizeResponse`
+ * messages are streamed back to the client.
+ *
+ * Here's an example of a series of ten `StreamingRecognizeResponse`s that might
+ * be returned while processing audio:
+ *
+ * 1. results { alternatives { transcript: "tube" } stability: 0.01 }
+ *
+ * 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
+ *
+ * 3. results { alternatives { transcript: "to be" } stability: 0.9 }
+ * results { alternatives { transcript: " or not to be" } stability: 0.01 }
+ *
+ * 4. results { alternatives { transcript: "to be or not to be"
+ * confidence: 0.92 }
+ * alternatives { transcript: "to bee or not to bee" }
+ * is_final: true }
+ *
+ * 5. results { alternatives { transcript: " that's" } stability: 0.01 }
+ *
+ * 6. results { alternatives { transcript: " that is" } stability: 0.9 }
+ * results { alternatives { transcript: " the question" } stability: 0.01 }
+ *
+ * 7. speech_event_type: END_OF_SINGLE_UTTERANCE
+ *
+ * 8. results { alternatives { transcript: " that is the question"
+ * confidence: 0.98 }
+ * alternatives { transcript: " that was the question" }
+ * is_final: true }
+ *
+ * Notes:
+ *
+ * - Only two of the above responses #4 and #8 contain final results; they are
+ * indicated by `is_final: true`. Concatenating these together generates the
+ * full transcript: "to be or not to be that is the question".
+ *
+ * - The others contain interim `results`. #3 and #6 contain two interim
+ * `results`: the first portion has a high stability and is less likely to
+ * change; the second portion has a low stability and is very likely to
+ * change. A UI designer might choose to show only high stability `results`.
+ *
+ * - The specific `stability` and `confidence` values shown above are only for
+ * illustrative purposes. Actual values may vary.
+ *
+ * - In each response, only one of these fields will be set:
+ * `error`,
+ * `speech_event_type`, or
+ * one or more (repeated) `results`.
+ *
+ * @property {Object} error
+ * *Output-only* If set, returns a {@link google.rpc.Status} message that
+ * specifies the error for the operation.
+ *
+ * This object should have the same structure as [google.rpc.Status]{@link external:"google.rpc.Status"}
+ *
+ * @property {Object[]} results
+ * *Output-only* This repeated list contains zero or more results that
+ * correspond to consecutive portions of the audio currently being processed.
+ * It contains zero or one `is_final=true` result (the newly settled portion),
+ * followed by zero or more `is_final=false` results.
+ *
+ * This object should have the same structure as [StreamingRecognitionResult]{@link StreamingRecognitionResult}
+ *
+ * @property {number} speechEventType
+ * *Output-only* Indicates the type of speech event.
+ *
+ * The number should be among the values of [SpeechEventType]{@link SpeechEventType}
+ *
+ * @class
+ * @see [google.cloud.speech.v1.StreamingRecognizeResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var StreamingRecognizeResponse = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+
+ /**
+ * Indicates the type of speech event.
+ *
+ * @enum {number}
+ */
+ SpeechEventType: {
+
+ /**
+ * No speech event specified.
+ */
+ SPEECH_EVENT_UNSPECIFIED: 0,
+
+ /**
+ * This event indicates that the server has detected the end of the user's
+ * speech utterance and expects no additional speech. Therefore, the server
+ * will not process additional audio (although it may subsequently return
+ * additional results). The client should stop sending additional audio
+ * data, half-close the gRPC connection, and wait for any additional results
+ * until the server closes the gRPC connection. This event is only sent if
+ * `single_utterance` was set to `true`, and is not used otherwise.
+ */
+ END_OF_SINGLE_UTTERANCE: 1
+ }
+};
+
+/**
+ * A streaming speech recognition result corresponding to a portion of the audio
+ * that is currently being processed.
+ *
+ * @property {Object[]} alternatives
+ * *Output-only* May contain one or more recognition hypotheses (up to the
+ * maximum specified in `max_alternatives`).
+ *
+ * This object should have the same structure as [SpeechRecognitionAlternative]{@link SpeechRecognitionAlternative}
+ *
+ * @property {boolean} isFinal
+ * *Output-only* If `false`, this `StreamingRecognitionResult` represents an
+ * interim result that may change. If `true`, this is the final time the
+ * speech service will return this particular `StreamingRecognitionResult`,
+ * the recognizer will not return any further hypotheses for this portion of
+ * the transcript and corresponding audio.
+ *
+ * @property {number} stability
+ * *Output-only* An estimate of the likelihood that the recognizer will not
+ * change its guess about this interim result. Values range from 0.0
+ * (completely unstable) to 1.0 (completely stable).
+ * This field is only provided for interim results (`is_final=false`).
+ * The default of 0.0 is a sentinel value indicating `stability` was not set.
+ *
+ * @class
+ * @see [google.cloud.speech.v1.StreamingRecognitionResult definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var StreamingRecognitionResult = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * A speech recognition result corresponding to a portion of the audio.
+ *
+ * @property {Object[]} alternatives
+ * *Output-only* May contain one or more recognition hypotheses (up to the
+ * maximum specified in `max_alternatives`).
+ *
+ * This object should have the same structure as [SpeechRecognitionAlternative]{@link SpeechRecognitionAlternative}
+ *
+ * @class
+ * @see [google.cloud.speech.v1.SpeechRecognitionResult definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var SpeechRecognitionResult = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
+
+/**
+ * Alternative hypotheses (a.k.a. n-best list).
+ *
+ * @property {string} transcript
+ * *Output-only* Transcript text representing the words that the user spoke.
+ *
+ * @property {number} confidence
+ * *Output-only* The confidence estimate between 0.0 and 1.0. A higher number
+ * indicates an estimated greater likelihood that the recognized words are
+ * correct. This field is typically provided only for the top hypothesis, and
+ * only for `is_final=true` results. Clients should not rely on the
+ * `confidence` field as it is not guaranteed to be accurate, or even set, in
+ * any of the results.
+ * The default of 0.0 is a sentinel value indicating `confidence` was not set.
+ *
+ * @class
+ * @see [google.cloud.speech.v1.SpeechRecognitionAlternative definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
+ */
+var SpeechRecognitionAlternative = {
+ // This is for documentation. Actual contents will be loaded by gRPC.
+};
\ No newline at end of file
diff --git a/packages/speech/src/v1/doc/doc_google_protobuf_any.js b/packages/speech/src/v1/doc/doc_google_protobuf_any.js
new file mode 100644
index 00000000000..0697ec15814
--- /dev/null
+++ b/packages/speech/src/v1/doc/doc_google_protobuf_any.js
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2017, Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Note: this file is purely for documentation. Any contents are not expected
+ * to be loaded as the JS file.
+ */
+
+/**
+ * `Any` contains an arbitrary serialized protocol buffer message along with a
+ * URL that describes the type of the serialized message.
+ *
+ * Protobuf library provides support to pack/unpack Any values in the form
+ * of utility functions or additional generated methods of the Any type.
+ *
+ * Example 1: Pack and unpack a message in C++.
+ *
+ * Foo foo = ...;
+ * Any any;
+ * any.PackFrom(foo);
+ * ...
+ * if (any.UnpackTo(&foo)) {
+ * ...
+ * }
+ *
+ * Example 2: Pack and unpack a message in Java.
+ *
+ * Foo foo = ...;
+ * Any any = Any.pack(foo);
+ * ...
+ * if (any.is(Foo.class)) {
+ * foo = any.unpack(Foo.class);
+ * }
+ *
+ * Example 3: Pack and unpack a message in Python.
+ *
+ * foo = Foo(...)
+ * any = Any()
+ * any.Pack(foo)
+ * ...
+ * if any.Is(Foo.DESCRIPTOR):
+ * any.Unpack(foo)
+ * ...
+ *
+ * The pack methods provided by protobuf library will by default use
+ * 'type.googleapis.com/full.type.name' as the type URL and the unpack
+ * methods only use the fully qualified type name after the last '/'
+ * in the type URL, for example "foo.bar.com/x/y.z" will yield type
+ * name "y.z".
+ *
+ *
+ * # JSON
+ *
+ * The JSON representation of an `Any` value uses the regular
+ * representation of the deserialized, embedded message, with an
+ * additional field `@type` which contains the type URL. Example:
+ *
+ * package google.profile;
+ * message Person {
+ * string first_name = 1;
+ * string last_name = 2;
+ * }
+ *
+ * {
+ * "@type": "type.googleapis.com/google.profile.Person",
+ * "firstName": ,
+ * "lastName":
+ * }
+ *
+ * If the embedded message type is well-known and has a custom JSON
+ * representation, that representation will be embedded adding a field
+ * `value` which holds the custom JSON in addition to the `@type`
+ * field. Example (for message {@link google.protobuf.Duration}):
+ *
+ * {
+ * "@type": "type.googleapis.com/google.protobuf.Duration",
+ * "value": "1.212s"
+ * }
+ *
+ * @external "google.protobuf.Any"
+ * @property {string} typeUrl
+ * A URL/resource name whose content describes the type of the
+ * serialized protocol buffer message.
+ *
+ * For URLs which use the scheme `http`, `https`, or no scheme, the
+ * following restrictions and interpretations apply:
+ *
+ * * If no scheme is provided, `https` is assumed.
+ * * The last segment of the URL's path must represent the fully
+ * qualified name of the type (as in `path/google.protobuf.Duration`).
+ * The name should be in a canonical form (e.g., leading "." is
+ * not accepted).
+ * * An HTTP GET on the URL must yield a {@link google.protobuf.Type}
+ * value in binary format, or produce an error.
+ * * Applications are allowed to cache lookup results based on the
+ * URL, or have them precompiled into a binary to avoid any
+ * lookup. Therefore, binary compatibility needs to be preserved
+ * on changes to types. (Use versioned type names to manage
+ * breaking changes.)
+ *
+ * Schemes other than `http`, `https` (or the empty scheme) might be
+ * used with implementation specific semantics.
+ *
+ * @property {string} value
+ * Must be a valid serialized protocol buffer of the above specified type.
+ *
+ * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto}
+ */
\ No newline at end of file
diff --git a/packages/speech/src/v1/doc/doc_google_rpc_status.js b/packages/speech/src/v1/doc/doc_google_rpc_status.js
new file mode 100644
index 00000000000..c85f1befe90
--- /dev/null
+++ b/packages/speech/src/v1/doc/doc_google_rpc_status.js
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2017, Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Note: this file is purely for documentation. Any contents are not expected
+ * to be loaded as the JS file.
+ */
+
+/**
+ * The `Status` type defines a logical error model that is suitable for different
+ * programming environments, including REST APIs and RPC APIs. It is used by
+ * [gRPC](https://github.com/grpc). The error model is designed to be:
+ *
+ * - Simple to use and understand for most users
+ * - Flexible enough to meet unexpected needs
+ *
+ * # Overview
+ *
+ * The `Status` message contains three pieces of data: error code, error message,
+ * and error details. The error code should be an enum value of
+ * {@link google.rpc.Code}, but it may accept additional error codes if needed. The
+ * error message should be a developer-facing English message that helps
+ * developers *understand* and *resolve* the error. If a localized user-facing
+ * error message is needed, put the localized message in the error details or
+ * localize it in the client. The optional error details may contain arbitrary
+ * information about the error. There is a predefined set of error detail types
+ * in the package `google.rpc` which can be used for common error conditions.
+ *
+ * # Language mapping
+ *
+ * The `Status` message is the logical representation of the error model, but it
+ * is not necessarily the actual wire format. When the `Status` message is
+ * exposed in different client libraries and different wire protocols, it can be
+ * mapped differently. For example, it will likely be mapped to some exceptions
+ * in Java, but more likely mapped to some error codes in C.
+ *
+ * # Other uses
+ *
+ * The error model and the `Status` message can be used in a variety of
+ * environments, either with or without APIs, to provide a
+ * consistent developer experience across different environments.
+ *
+ * Example uses of this error model include:
+ *
+ * - Partial errors. If a service needs to return partial errors to the client,
+ * it may embed the `Status` in the normal response to indicate the partial
+ * errors.
+ *
+ * - Workflow errors. A typical workflow has multiple steps. Each step may
+ * have a `Status` message for error reporting purpose.
+ *
+ * - Batch operations. If a client uses batch request and batch response, the
+ * `Status` message should be used directly inside batch response, one for
+ * each error sub-response.
+ *
+ * - Asynchronous operations. If an API call embeds asynchronous operation
+ * results in its response, the status of those operations should be
+ * represented directly using the `Status` message.
+ *
+ * - Logging. If some API errors are stored in logs, the message `Status` could
+ * be used directly after any stripping needed for security/privacy reasons.
+ *
+ * @external "google.rpc.Status"
+ * @property {number} code
+ * The status code, which should be an enum value of {@link google.rpc.Code}.
+ *
+ * @property {string} message
+ * A developer-facing error message, which should be in English. Any
+ * user-facing error message should be localized and sent in the
+ * {@link google.rpc.Status.details} field, or localized by the client.
+ *
+ * @property {Object[]} details
+ * A list of messages that carry the error details. There will be a
+ * common set of message types for APIs to use.
+ *
+ * This object should have the same structure as [google.protobuf.Any]{@link external:"google.protobuf.Any"}
+ *
+ * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto}
+ */
\ No newline at end of file
diff --git a/packages/speech/src/v1/index.js b/packages/speech/src/v1/index.js
index 65030f74032..0abd7e3dada 100644
--- a/packages/speech/src/v1/index.js
+++ b/packages/speech/src/v1/index.js
@@ -1,11 +1,11 @@
/*
- * Copyright 2016 Google Inc. All rights reserved.
+ * Copyright 2017, Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -27,7 +27,8 @@ function v1(options) {
return speechClient(gaxGrpc);
}
+v1.GAPIC_VERSION = '0.7.1';
v1.SERVICE_ADDRESS = speechClient.SERVICE_ADDRESS;
v1.ALL_SCOPES = speechClient.ALL_SCOPES;
-module.exports = v1;
+module.exports = v1;
\ No newline at end of file
diff --git a/packages/speech/src/v1/speech_client.js b/packages/speech/src/v1/speech_client.js
index f6975908973..bb3096b41ef 100644
--- a/packages/speech/src/v1/speech_client.js
+++ b/packages/speech/src/v1/speech_client.js
@@ -52,15 +52,6 @@ var ALL_SCOPES = [
/**
* Service that implements Google Cloud Speech API.
*
- * This will be created through a builder function which can be obtained by the module.
- * See the following example of how to initialize the module and how to access to the builder.
- * @see {@link speechClient}
- *
- * @example
- * var speechV1 = require('@google-cloud/speech').v1({
- * // optional auth parameters.
- * });
- * var client = speechV1.speechClient();
*
* @class
*/
@@ -165,8 +156,13 @@ SpeechClient.prototype.getProjectId = function(callback) {
*
* @example
*
- * var client = speechV1.speechClient();
- * var encoding = speechV1.RecognitionConfig.AudioEncoding.FLAC;
+ * var speech = require('@google-cloud/speech');
+ *
+ * var client = speech.v1({
+ * // optional auth parameters.
+ * });
+ *
+ * var encoding = speech.v1.types.RecognitionConfig.AudioEncoding.FLAC;
* var sampleRateHertz = 44100;
* var languageCode = 'en-US';
* var config = {
@@ -185,7 +181,8 @@ SpeechClient.prototype.getProjectId = function(callback) {
* client.recognize(request).then(function(responses) {
* var response = responses[0];
* // doThingsWith(response)
- * }).catch(function(err) {
+ * })
+ * .catch(function(err) {
* console.error(err);
* });
*/
@@ -231,8 +228,13 @@ SpeechClient.prototype.recognize = function(request, options, callback) {
*
* @example
*
- * var client = speechV1.speechClient();
- * var encoding = speechV1.RecognitionConfig.AudioEncoding.FLAC;
+ * var speech = require('@google-cloud/speech');
+ *
+ * var client = speech.v1({
+ * // optional auth parameters.
+ * });
+ *
+ * var encoding = speech.v1.types.RecognitionConfig.AudioEncoding.FLAC;
* var sampleRateHertz = 44100;
* var languageCode = 'en-US';
* var config = {
@@ -265,7 +267,8 @@ SpeechClient.prototype.recognize = function(request, options, callback) {
*
* // The response of the api call returning the complete operation.
* var finalApiResponse = responses[2];
- * }).catch(function(err) {
+ * })
+ * .catch(function(err) {
* console.error(err);
* });
*
@@ -290,7 +293,8 @@ SpeechClient.prototype.recognize = function(request, options, callback) {
* operation.on('error', function(err) {
* // throw(err);
* })
- * }).catch(function(err) {
+ * })
+ * .catch(function(err) {
* console.error(err);
* });
*/
@@ -320,9 +324,14 @@ SpeechClient.prototype.longRunningRecognize = function(request, options, callbac
*
* @example
*
- * var client = speechV1.speechClient();
+ * var speech = require('@google-cloud/speech');
+ *
+ * var client = speech.v1({
+ * // optional auth parameters.
+ * });
+ *
* var stream = client.streamingRecognize().on('data', function(response) {
- * // doThingsWith(response);
+ * // doThingsWith(response)
* });
* var request = {};
* // Write request objects.
@@ -369,4 +378,4 @@ function SpeechClientBuilder(gaxGrpc) {
}
module.exports = SpeechClientBuilder;
module.exports.SERVICE_ADDRESS = SERVICE_ADDRESS;
-module.exports.ALL_SCOPES = ALL_SCOPES;
\ No newline at end of file
+module.exports.ALL_SCOPES = ALL_SCOPES;
diff --git a/packages/speech/system-test/data/bridge.raw b/packages/speech/system-test/data/bridge.raw
deleted file mode 100644
index 5ebf79d3c9c..00000000000
Binary files a/packages/speech/system-test/data/bridge.raw and /dev/null differ
diff --git a/packages/speech/system-test/data/quit.raw b/packages/speech/system-test/data/quit.raw
deleted file mode 100644
index a01dfc45a59..00000000000
Binary files a/packages/speech/system-test/data/quit.raw and /dev/null differ
diff --git a/packages/speech/system-test/data/spain.raw b/packages/speech/system-test/data/spain.raw
deleted file mode 100644
index 35413b78817..00000000000
Binary files a/packages/speech/system-test/data/spain.raw and /dev/null differ
diff --git a/packages/speech/system-test/speech.js b/packages/speech/system-test/speech.js
deleted file mode 100644
index f5856aa602a..00000000000
--- a/packages/speech/system-test/speech.js
+++ /dev/null
@@ -1,328 +0,0 @@
-/*!
- * Copyright 2016 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-'use strict';
-
-var assert = require('assert');
-var async = require('async');
-var exec = require('methmeth');
-var extend = require('extend');
-var fs = require('fs');
-var path = require('path');
-var uuid = require('uuid');
-
-var env = require('../../../system-test/env.js');
-var Speech = require('../');
-var storage = require('@google-cloud/storage')(env);
-
-var FILENAMES = ['bridge', 'spain', 'quit'];
-var AUDIO_FILES = {};
-var BUCKET_NAME = 'gcloud-test-bucket-temp-' + uuid.v1();
-
-FILENAMES.forEach(function(filename) {
- var name = filename + '.raw';
-
- AUDIO_FILES[filename] = {
- name: name,
- path: path.join(__dirname, 'data/' + name),
- gcsUri: 'gs://' + BUCKET_NAME + '/' + name,
- httpUri: 'https://storage.googleapis.com/' + BUCKET_NAME + '/' + name
- };
-});
-
-describe('Speech', function() {
- var speech = new Speech(env);
- var bucket = storage.bucket(BUCKET_NAME);
-
- var OPTIONS = {
- encoding: 'LINEAR16',
- languageCode: 'en-US',
- sampleRateHertz: 16000
- };
-
- var OPTIONS_VERBOSE = extend({}, OPTIONS, {
- verbose: true
- });
-
- var TRANSCRIPTION = 'how old is the Brooklyn Bridge';
-
- before(function(done) {
- async.waterfall([
- function(next) {
- bucket.create(next);
- },
-
- function(_, apiResponse, next) {
- async.map(FILENAMES, function(filename, onComplete) {
- fs.readFile(AUDIO_FILES[filename].path, onComplete);
- }, next);
- },
-
- function(files, next) {
- FILENAMES.forEach(function(filename, i) {
- AUDIO_FILES[filename].content = files[i];
- });
-
- async.map(FILENAMES, function(filename, onComplete) {
- var file = bucket.file(AUDIO_FILES[filename].name);
-
- file.save(AUDIO_FILES[filename].content, function(err) {
- onComplete(err, file);
- });
- }, next);
- },
-
- function(files, next) {
- async.map(files, exec('makePublic'), next);
- }
- ], done);
- });
-
- after(function(done) {
- bucket.deleteFiles({
- force: true
- }, function(err) {
- if (err) {
- done(err);
- return;
- }
-
- bucket.delete(done);
- });
- });
-
- describe('recognize', function() {
- it('recognizes speech from raw audio', function(done) {
- fs.readFile(AUDIO_FILES.bridge.path, function(err, audioFile) {
- assert.ifError(err);
-
- speech.recognize({
- content: audioFile,
- }, OPTIONS, assertSimplifiedResponse(done));
- });
- });
-
- it('recognizes speech in verbose mode', function(done) {
- fs.readFile(AUDIO_FILES.bridge.path, function(err, audioFile) {
- assert.ifError(err);
-
- speech.recognize({
- content: audioFile,
- }, OPTIONS_VERBOSE, assertVerboseResponse(done));
- });
- });
-
- it('recognizes speech from local file', function(done) {
- speech.recognize(AUDIO_FILES.bridge.path, {
- // encoding should be automatically detected
- languageCode: 'en-US',
- sampleRateHertz: 16000
- }, assertSimplifiedResponse(done));
- });
-
- it('recognizes speech from remote GCS audio file', function(done) {
- var uri = AUDIO_FILES.bridge.gcsUri;
-
- speech.recognize(uri, OPTIONS, assertSimplifiedResponse(done));
- });
-
- it('recognizes speech from remote audio file', function(done) {
- var uri = AUDIO_FILES.bridge.httpUri;
-
- speech.recognize(uri, OPTIONS, assertSimplifiedResponse(done));
- });
- });
-
- describe('startRecognition', function() {
- it('recognizes speech from raw audio', function(done) {
- fs.readFile(AUDIO_FILES.bridge.path, function(err, audioFile) {
- assert.ifError(err);
-
- speech.startRecognition({
- content: audioFile
- }, OPTIONS, function(err, operation) {
- assert.ifError(err);
-
- operation
- .on('error', done)
- .on('complete', assertSimplifiedResponseOperation(done));
- });
- });
- });
-
- it('recognizes speech from raw audio in verbose mode', function(done) {
- fs.readFile(AUDIO_FILES.bridge.path, function(err, audioFile) {
- assert.ifError(err);
-
- speech.startRecognition({
- content: audioFile
- }, OPTIONS_VERBOSE, function(err, operation) {
- assert.ifError(err);
-
- operation
- .on('error', done)
- .on('complete', assertVerboseResponseOperation(done));
- });
- });
- });
-
- it('recognizes speech from local file', function(done) {
- var options = {
- // encoding should be automatically detected
- languageCode: 'en-US',
- sampleRateHertz: 16000
- };
-
- var path = AUDIO_FILES.bridge.path;
-
- speech.startRecognition(path, options, function(err, operation) {
- assert.ifError(err);
-
- operation
- .on('error', done)
- .on('complete', assertSimplifiedResponseOperation(done));
- });
- });
-
- it('recognizes speech from remote GCS audio file', function(done) {
- var uri = AUDIO_FILES.bridge.gcsUri;
-
- speech.startRecognition(uri, OPTIONS, function(err, operation) {
- assert.ifError(err);
-
- operation
- .on('error', done)
- .on('complete', assertSimplifiedResponseOperation(done));
- });
- });
-
- it('recognizes speech from remote audio file', function(done) {
- var uri = AUDIO_FILES.bridge.httpUri;
-
- speech.startRecognition(uri, OPTIONS, function(err, operation) {
- assert.ifError(err);
-
- operation
- .on('error', done)
- .on('complete', assertSimplifiedResponseOperation(done));
- });
- });
-
- it('runs operation as a promise', function() {
- var uri = AUDIO_FILES.bridge.httpUri;
-
- return speech.startRecognition(uri, OPTIONS)
- .then(function(response) {
- var operation = response[0];
- return operation.promise();
- });
- });
- });
-
- describe('createRecognizeStream', function() {
- it('recognizes speech from raw audio', function(done) {
- var transcribed = false;
- var responseEmitted = false;
-
- fs.createReadStream(AUDIO_FILES.bridge.path)
- .on('error', done)
- .pipe(speech.createRecognizeStream({
- config: OPTIONS,
- interimResults: false,
- singleUtterance: false
- }))
- .on('error', done)
- .on('response', function() {
- responseEmitted = true;
- })
- .on('data', function(data) {
- if (data.speechEventType === 'SPEECH_EVENT_UNSPECIFIED') {
- if (data.results === TRANSCRIPTION) {
- transcribed = true;
- }
- }
- })
- .on('end', function() {
- setTimeout(function() {
- assert.strictEqual(responseEmitted, true);
- assert.strictEqual(transcribed, true);
- done();
- }, 1500);
- });
- });
-
- it('recognizes speech from raw audio in verbose mode', function(done) {
- var transcribed = false;
- var responseEmitted = false;
-
- fs.createReadStream(AUDIO_FILES.bridge.path)
- .on('error', done)
- .pipe(speech.createRecognizeStream({
- config: OPTIONS,
- interimResults: false,
- singleUtterance: false,
- verbose: true
- }))
- .on('error', done)
- .on('response', function() {
- responseEmitted = true;
- })
- .on('data', function(data) {
- if (data.speechEventType === 'SPEECH_EVENT_UNSPECIFIED') {
- if (data.results[0].transcript === TRANSCRIPTION) {
- transcribed = true;
- }
- }
- })
- .on('end', function() {
- setTimeout(function() {
- assert.strictEqual(responseEmitted, true);
- assert.strictEqual(transcribed, true);
- done();
- }, 1500);
- });
- });
- });
-
- function assertSimplifiedResponse(done) {
- return function(err, transcript) {
- assert.ifError(err);
- assert.strictEqual(transcript, TRANSCRIPTION);
- done();
- };
- }
-
- function assertVerboseResponse(done) {
- return function(err, results) {
- assert.ifError(err);
-
- assert(results.length > 0);
-
- var transcript = results[0].transcript;
- assert.strictEqual(transcript, TRANSCRIPTION);
-
- done();
- };
- }
-
- function assertSimplifiedResponseOperation(done) {
- return assertSimplifiedResponse(done).bind(null, null);
- }
-
- function assertVerboseResponseOperation(done) {
- return assertVerboseResponse(done).bind(null, null);
- }
-});
diff --git a/packages/speech/test/v1/v1.js b/packages/speech/test/gapic-v1.test.js
similarity index 77%
rename from packages/speech/test/v1/v1.js
rename to packages/speech/test/gapic-v1.test.js
index 3da63317a9e..2e37b7ff538 100644
--- a/packages/speech/test/v1/v1.js
+++ b/packages/speech/test/gapic-v1.test.js
@@ -1,11 +1,11 @@
/*
- * Copyright 2016 Google Inc. All rights reserved.
+ * Copyright 2017, Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+'use strict';
var assert = require('assert');
var speechV1 = require('../src/v1')();
@@ -27,8 +28,18 @@ describe('SpeechClient', function() {
it('invokes recognize without error', function(done) {
var client = speechV1.speechClient();
// Mock request
- var config = {};
- var audio = {};
+ var encoding = speechV1.RecognitionConfig.AudioEncoding.FLAC;
+ var sampleRateHertz = 44100;
+ var languageCode = 'en-US';
+ var config = {
+ encoding : encoding,
+ sampleRateHertz : sampleRateHertz,
+ languageCode : languageCode
+ };
+ var uri = 'gs://bucket_name/file_name.flac';
+ var audio = {
+ uri : uri
+ };
var request = {
config : config,
audio : audio
@@ -50,8 +61,18 @@ describe('SpeechClient', function() {
it('invokes recognize with error', function(done) {
var client = speechV1.speechClient();
// Mock request
- var config = {};
- var audio = {};
+ var encoding = speechV1.RecognitionConfig.AudioEncoding.FLAC;
+ var sampleRateHertz = 44100;
+ var languageCode = 'en-US';
+ var config = {
+ encoding : encoding,
+ sampleRateHertz : sampleRateHertz,
+ languageCode : languageCode
+ };
+ var uri = 'gs://bucket_name/file_name.flac';
+ var audio = {
+ uri : uri
+ };
var request = {
config : config,
audio : audio
@@ -72,8 +93,18 @@ describe('SpeechClient', function() {
it('invokes longRunningRecognize without error', function(done) {
var client = speechV1.speechClient();
// Mock request
- var config = {};
- var audio = {};
+ var encoding = speechV1.RecognitionConfig.AudioEncoding.FLAC;
+ var sampleRateHertz = 44100;
+ var languageCode = 'en-US';
+ var config = {
+ encoding : encoding,
+ sampleRateHertz : sampleRateHertz,
+ languageCode : languageCode
+ };
+ var uri = 'gs://bucket_name/file_name.flac';
+ var audio = {
+ uri : uri
+ };
var request = {
config : config,
audio : audio
@@ -99,8 +130,18 @@ describe('SpeechClient', function() {
it('invokes longRunningRecognize with error', function(done) {
var client = speechV1.speechClient();
// Mock request
- var config = {};
- var audio = {};
+ var encoding = speechV1.RecognitionConfig.AudioEncoding.FLAC;
+ var sampleRateHertz = 44100;
+ var languageCode = 'en-US';
+ var config = {
+ encoding : encoding,
+ sampleRateHertz : sampleRateHertz,
+ languageCode : languageCode
+ };
+ var uri = 'gs://bucket_name/file_name.flac';
+ var audio = {
+ uri : uri
+ };
var request = {
config : config,
audio : audio
@@ -200,7 +241,7 @@ function mockLongRunningGrpcMethod(expectedRequest, response, error) {
promise: function() {
return new Promise(function(resolve, reject) {
if (error) {
- reject(error)
+ reject(error);
} else {
resolve([response]);
}
diff --git a/packages/speech/test/helpers.test.js b/packages/speech/test/helpers.test.js
new file mode 100644
index 00000000000..7064630d557
--- /dev/null
+++ b/packages/speech/test/helpers.test.js
@@ -0,0 +1,54 @@
+/*!
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+'use strict';
+
+var assert = require('assert');
+var sinon = require('sinon');
+var stream = require('stream');
+
+var Speech = require('../');
+
+
+describe('Speech helper methods', () => {
+ var sandbox = sinon.sandbox.create();
+
+ afterEach(() => {
+ sandbox.restore();
+ });
+
+ describe('streamingRecognize', () => {
+ it('writes the config to the resulting stream', () => {
+ var speech = Speech.v1();
+
+ // Stub the underlying _streamingRecognize method to just return
+ // a bogus stream.
+ var writable = stream.Writable();
+ var sr = sandbox.stub(speech, '_streamingRecognize').returns(writable);
+
+ // Call the new helper method and establish that the config was
+ // forwarded as expected.
+ var config = {config: {languageCode: 'en-us'}};
+ var options = {timeout: Infinity};
+ speech.streamingRecognize(config, options);
+
+ // Establish that the underlying streamingRecognize was called with
+ // the options.
+ assert(sr.calledOnce);
+ assert(sr.calledWithExactly(options));
+ });
+ });
+});
diff --git a/packages/speech/test/index.js b/packages/speech/test/index.js
deleted file mode 100644
index f89c5cd4421..00000000000
--- a/packages/speech/test/index.js
+++ /dev/null
@@ -1,1218 +0,0 @@
-/**
- * Copyright 2016 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-'use strict';
-
-var assert = require('assert');
-var async = require('async');
-var extend = require('extend');
-var fs = require('fs');
-var googleProtoFiles = require('google-proto-files');
-var proxyquire = require('proxyquire');
-var through = require('through2');
-var tmp = require('tmp');
-
-var util = require('@google-cloud/common').util;
-
-var promisified = false;
-var fakeUtil = extend({}, util, {
- promisifyAll: function(Class, options) {
- if (Class.name !== 'Speech') {
- return;
- }
-
- promisified = true;
- assert.deepEqual(options.exclude, ['operation']);
- }
-});
-
-function FakeGrpcOperation() {
- this.calledWith_ = arguments;
-}
-
-function FakeGrpcService() {
- this.calledWith_ = arguments;
-}
-
-var fakeV1Override;
-function fakeV1() {
- if (fakeV1Override) {
- return fakeV1Override.apply(null, arguments);
- }
-
- return {
- speechClient: util.noop
- };
-}
-
-var requestOverride = null;
-var fakeRequest = function() {
- return (requestOverride || util.noop).apply(this, arguments);
-};
-
-describe('Speech', function() {
- var OPTIONS = {
- projectId: 'project-id'
- };
-
- var Speech;
- var speech;
-
- var originalStaticMembers;
-
- before(function() {
- Speech = proxyquire('../', {
- '@google-cloud/common': {
- util: fakeUtil
- },
- '@google-cloud/common-grpc': {
- Operation: FakeGrpcOperation,
- Service: FakeGrpcService
- },
- request: fakeRequest,
- './v1': fakeV1
- });
-
- originalStaticMembers = Object.keys(Speech).reduce(function(statics, key) {
- statics[key] = Speech[key];
- return statics;
- }, {});
- });
-
- beforeEach(function() {
- fakeV1Override = null;
- requestOverride = null;
-
- speech = new Speech(OPTIONS);
-
- extend(Speech, originalStaticMembers);
- });
-
- describe('instantiation', function() {
- it('should promisify all the things', function() {
- assert(promisified);
- });
-
- it('should normalize the arguments', function() {
- var normalizeArguments = fakeUtil.normalizeArguments;
- var normalizeArgumentsCalled = false;
- var fakeOptions = { projectId: OPTIONS.projectId };
- var fakeContext = {};
-
- fakeUtil.normalizeArguments = function(context, options) {
- normalizeArgumentsCalled = true;
- assert.strictEqual(context, fakeContext);
- assert.strictEqual(options, fakeOptions);
- return options;
- };
-
- Speech.call(fakeContext, fakeOptions);
- assert(normalizeArgumentsCalled);
-
- fakeUtil.normalizeArguments = normalizeArguments;
- });
-
- it('should create a gax api client', function() {
- var expectedSpeechService = {};
-
- fakeV1Override = function(options) {
- var expected = extend({}, OPTIONS, {
- libName: 'gccl',
- libVersion: require('../package.json').version
- });
- assert.deepStrictEqual(options, expected);
-
- return {
- speechClient: function(options) {
- assert.deepStrictEqual(options, expected);
- return expectedSpeechService;
- }
- };
- };
-
- var speech = new Speech(OPTIONS);
-
- assert.deepEqual(speech.api, {
- Speech: expectedSpeechService
- });
- });
-
- it('should inherit from GrpcService', function() {
- assert(speech instanceof FakeGrpcService);
-
- var calledWith = speech.calledWith_[0];
-
- assert.deepEqual(calledWith, {
- baseUrl: 'speech.googleapis.com',
- projectIdRequired: false,
- service: 'speech',
- protoServices: {
- Operations: {
- path: googleProtoFiles('longrunning', 'operations.proto'),
- service: 'longrunning'
- }
- },
- scopes: [
- 'https://www.googleapis.com/auth/cloud-platform'
- ],
- packageJson: require('../package.json')
- });
- });
- });
-
- describe('eventTypes', function() {
- var EVENT_TYPES = {
- END_OF_SINGLE_UTTERANCE: 'END_OF_SINGLE_UTTERANCE',
- ENDPOINTER_EVENT_UNSPECIFIED: 'ENDPOINTER_EVENT_UNSPECIFIED'
- };
-
- it('should export static eventTypes', function() {
- assert.deepEqual(Speech.eventTypes, EVENT_TYPES);
- });
-
- it('should export instance eventTypes', function() {
- assert.deepEqual(speech.eventTypes, EVENT_TYPES);
- });
- });
-
- describe('detectEncoding_', function() {
- it('should detect encoding', function() {
- assert.equal(Speech.detectEncoding_('foo.raw'), 'LINEAR16');
- assert.equal(Speech.detectEncoding_('foo.amr'), 'AMR');
- assert.equal(Speech.detectEncoding_('foo.awb'), 'AMR_WB');
- assert.equal(Speech.detectEncoding_('foo.flac'), 'FLAC');
- assert.equal(Speech.detectEncoding_('foo.fLAc'), 'FLAC');
- assert.equal(Speech.detectEncoding_('foo.wav'), 'MULAW');
- assert.equal(Speech.detectEncoding_('foo.au'), 'MULAW');
- });
-
- it('should throw if a supported encoding is not detected', function() {
- assert.throws(function() {
- Speech.detectEncoding_('blah.mp3');
- }, /Encoding could not be determined for file: blah\.mp3/);
- });
-
- it('should return nothing if the argument is not a string', function() {
- assert.strictEqual(Speech.detectEncoding_({}), undefined);
- });
- });
-
- describe('findFile_', function() {
- it('should return buffer for snippet sandbox', function(done) {
- global.GCLOUD_SANDBOX_ENV = true;
-
- Speech.findFile_({}, function(err, foundFile) {
- delete global.GCLOUD_SANDBOX_ENV;
- assert.ifError(err);
-
- assert.deepEqual(foundFile, {
- content: new Buffer('')
- });
-
- done();
- });
- });
-
- it('should convert a File object', function(done) {
- var file = {
- bucket: {
- name: 'bucket-name'
- },
- name: 'file-name'
- };
-
- var isCustomTypeCalled = false;
- var isCustomType = fakeUtil.isCustomType;
-
- fakeUtil.isCustomType = function(obj, module) {
- isCustomTypeCalled = true;
- fakeUtil.isCustomType = isCustomType;
- assert.strictEqual(obj, file);
- assert.strictEqual(module, 'storage/file');
- return true;
- };
-
- Speech.findFile_(file, function(err, foundFile) {
- assert.ifError(err);
-
- assert.deepEqual(foundFile, {
- uri: 'gs://' + file.bucket.name + '/' + file.name
- });
-
- assert.strictEqual(isCustomTypeCalled, true);
-
- done();
- });
- });
-
- it('should detect a gs:// path', function(done) {
- var file = 'gs://your-bucket-name/audio.raw';
-
- Speech.findFile_(file, function(err, foundFile) {
- assert.ifError(err);
-
- assert.deepEqual(foundFile, {
- uri: file
- });
-
- done();
- });
- });
-
- it('should get a file from a URL', function(done) {
- var fileUri = 'http://www.google.com/audio.raw';
- var body = 'body';
-
- requestOverride = function(reqOpts, callback) {
- assert.strictEqual(reqOpts.uri, fileUri);
- assert.strictEqual(reqOpts.encoding, null);
-
- var response = {
- body: new Buffer(body)
- };
-
- callback(null, response, response.body);
- };
-
- Speech.findFile_(fileUri, function(err, foundFile) {
- assert.ifError(err);
- assert.deepEqual(foundFile, {
- content: new Buffer(body)
- });
- done();
- });
- });
-
- it('should return an error from reading a URL', function(done) {
- var fileUri = 'http://www.google.com/audio.raw';
- var error = new Error('Error.');
-
- requestOverride = function(options, callback) {
- callback(error);
- };
-
- Speech.findFile_(fileUri, function(err) {
- assert.strictEqual(err, error);
- done();
- });
- });
-
- it('should accept a buffer', function(done) {
- var file = new Buffer('abc');
-
- Speech.findFile_(file, function(err, foundFile) {
- assert.ifError(err);
-
- assert.deepEqual(foundFile, {
- content: file
- });
-
- done();
- });
- });
-
- it('should validate RecognitionAudio object', function(done) {
- var file = {};
-
- Speech.findFile_(file, function(err) {
- assert.strictEqual(
- err.message,
- 'RecognitionAudio requires a "content" or "uri" property.'
- );
-
- done();
- });
- });
-
- it('should accept RecognitionAudio object', function(done) {
- var file = {
- content: 'aGk='
- };
-
- Speech.findFile_(file, function(err, foundFile) {
- assert.ifError(err);
- assert.strictEqual(foundFile, file);
- done();
- });
- });
-
- it('should read from a file path', function(done) {
- tmp.setGracefulCleanup();
-
- tmp.file(function tempFileCreated_(err, tmpFilePath) {
- assert.ifError(err);
-
- var contents = 'abcdef';
-
- function writeFile(callback) {
- fs.writeFile(tmpFilePath, contents, callback);
- }
-
- function convertFile(callback) {
- Speech.findFile_(tmpFilePath, callback);
- }
-
- async.waterfall([writeFile, convertFile], function(err, foundFile) {
- assert.ifError(err);
-
- assert.deepEqual(foundFile, {
- content: new Buffer(contents)
- });
-
- done();
- });
- });
- });
-
- it('should return an error when file cannot be found', function(done) {
- Speech.findFile_('./not-real-file.raw', function(err) {
- assert.strictEqual(err.code, 'ENOENT');
- done();
- });
- });
- });
-
- describe('formatResults_', function() {
- describe('SpeechRecognitionResult', function() {
- var SPEECH_RECOGNITION = {
- original: [
- {
- alternatives: [
- {
- transcript: 'Result 1a',
- confidence: 0.70,
- stability: 0.1
- },
- {
- transcript: 'Result 1b',
- confidence: 0.60,
- stability: 0.1
- }
- ]
- },
- {
- alternatives: [
- {
- transcript: 'Result 2a',
- confidence: 0.90,
- stability: 0.1
- },
- {
- transcript: 'Result 2b',
- confidence: 0.80,
- stability: 0.1
- }
- ]
- }
- ],
-
- expectedDefault: 'Result 1a Result 2a',
-
- expectedVerbose: [
- {
- transcript: 'Result 1a',
- confidence: 70,
- stability: 10,
- alternatives: [
- {
- transcript: 'Result 1b',
- confidence: 60,
- stability: 10,
- }
- ]
- },
- {
- transcript: 'Result 2a',
- confidence: 90,
- stability: 10,
- alternatives: [
- {
- transcript: 'Result 2b',
- confidence: 80,
- stability: 10
- }
- ]
- }
- ]
- };
-
- it('should simplify the results', function() {
- assert.deepEqual(
- Speech.formatResults_(SPEECH_RECOGNITION.original),
- SPEECH_RECOGNITION.expectedDefault
- );
- });
-
- it('should simplify the results in verbose mode', function() {
- assert.deepEqual(
- Speech.formatResults_(SPEECH_RECOGNITION.original, true),
- SPEECH_RECOGNITION.expectedVerbose
- );
- });
- });
- });
-
- describe('createRecognizeStream', function() {
- var CONFIG = {
- languageCode: 'en-US'
- };
- var stream;
- var requestStream;
-
- beforeEach(function() {
- stream = speech.createRecognizeStream(CONFIG);
-
- stream.setPipeline = util.noop;
-
- speech.api.Speech = {
- streamingRecognize: function() {
- requestStream = through.obj();
- return requestStream;
- }
- };
- });
-
- it('should throw if an object is not provided', function() {
- assert.throws(function() {
- speech.createRecognizeStream();
- }, /A recognize request requires a configuration object\./);
- });
-
- it('should throw if a language code is not provided', function() {
- assert.throws(function() {
- speech.createRecognizeStream({});
- }, /languageCode/);
- });
-
- it('should make the correct request once writing started', function(done) {
- speech.api.Speech = {
- streamingRecognize: function() {
- setImmediate(done);
- return through.obj();
- }
- };
-
- stream.emit('writing');
- });
-
- it('should destroy user stream if request stream errors', function(done) {
- var error = new Error('Error.');
-
- stream.on('error', function(err) {
- assert.strictEqual(error, err);
- done();
- });
-
- speech.api.Speech = {
- streamingRecognize: function() {
- var requestStream = through.obj();
-
- setImmediate(function() {
- requestStream.destroy(error);
- });
-
- return requestStream;
- }
- };
-
- stream.emit('writing');
- });
-
- it('should emit the response event on the user stream', function(done) {
- var response = {};
-
- stream.on('response', function(response_) {
- assert.strictEqual(response_, response);
- done();
- });
-
- speech.api.Speech = {
- streamingRecognize: function() {
- var requestStream = through.obj();
-
- setImmediate(function() {
- requestStream.emit('response', response);
- });
-
- return requestStream;
- }
- };
-
- stream.emit('writing');
- });
-
- it('should send the initial write to the request stream', function(done) {
- speech.api.Speech = {
- streamingRecognize: function() {
- var requestStream = through.obj();
-
- requestStream.once('data', function(data) {
- var expected = extend(true, {
- config: {
- languageCode: 'en-US'
- }
- }, CONFIG);
- delete expected.languageCode;
-
- assert.deepEqual(data, {
- streamingConfig: expected
- });
- done();
- });
-
- return requestStream;
- }
- };
-
- stream.emit('writing');
- });
-
- it('should format the incoming data into a duplex stream', function(done) {
- stream.setPipeline = function(streams) {
- var formatStream = streams[0];
- assert.strictEqual(streams[1], requestStream);
-
- var chunk = {};
- formatStream.once('data', function(data) {
- assert.deepEqual(data, {
- audioContent: chunk
- });
- done();
- });
-
- formatStream.end(chunk);
- };
-
- stream.emit('writing');
- });
-
- it('should format the results from the API', function(done) {
- stream.setPipeline = function(streams) {
- var formatStream = streams[2];
-
- var streamingRecognizeResponse = {
- results: []
- };
-
- var formattedResults = [];
-
- Speech.formatResults_ = function(results, verboseMode) {
- assert.strictEqual(results, streamingRecognizeResponse.results);
- assert.strictEqual(verboseMode, false);
- return formattedResults;
- };
-
- formatStream.once('data', function(data) {
- assert.strictEqual(data, streamingRecognizeResponse);
- assert.deepEqual(data.results, formattedResults);
- done();
- });
-
- formatStream.end(streamingRecognizeResponse);
- };
-
- stream.emit('writing');
- });
-
- it('should format results from the API in verbose mode', function(done) {
- var stream = speech.createRecognizeStream({
- languageCode: 'en-US',
- verbose: true
- });
-
- speech.requestWritableStream = function() {
- return through.obj();
- };
-
- stream.setPipeline = function(streams) {
- var formatStream = streams[2];
-
- Speech.formatResults_ = function(results, verboseMode) {
- assert.strictEqual(verboseMode, true);
- done();
- };
-
- formatStream.end({});
- };
-
- stream.emit('writing');
- });
-
- it('should delete verbose option from request object', function(done) {
- var stream = speech.createRecognizeStream({
- languageCode: 'en-US',
- verbose: true
- });
-
- speech.api.Speech = {
- streamingRecognize: function() {
- var stream = through.obj();
-
- stream.on('data', function(data) {
- assert.strictEqual(data.streamingConfig.verbose, undefined);
- done();
- });
-
- return stream;
- }
- };
-
- stream.emit('writing');
- });
-
- it('should allow specifying a timeout', function(done) {
- var timeout = 200;
- var expectedTimeout = 200 * 1000;
-
- speech.api.Speech = {
- streamingRecognize: function(opts) {
- var requestStream = through.obj();
- requestStream._write = util.noop;
-
- assert.strictEqual(opts.timeout, expectedTimeout);
- setImmediate(done);
-
- return requestStream;
- }
- };
-
- var stream = speech.createRecognizeStream({
- languageCode: 'en-US',
- timeout: timeout
- });
-
- stream.emit('writing');
- });
-
- it('should delete timeout option from request object', function(done) {
- speech.api.Speech = {
- streamingRecognize: function() {
- var stream = through.obj();
-
- stream.on('data', function(data) {
- assert.strictEqual(data.streamingConfig.timeout, undefined);
- done();
- });
-
- return stream;
- }
- };
-
- var stream = speech.createRecognizeStream({
- languageCode: 'en-US',
- timeout: 90
- });
-
- stream.emit('writing');
- });
-
- it('should allow specifying a languageCode', function(done) {
- var languageCode = 'uk';
-
- speech.api.Speech = {
- streamingRecognize: function() {
- var stream = through.obj();
-
- stream.on('data', function(data) {
- assert.strictEqual(
- data.streamingConfig.config.languageCode,
- languageCode
- );
- done();
- });
-
- return stream;
- }
- };
-
- var stream = speech.createRecognizeStream({
- config: {
- languageCode: languageCode
- }
- });
-
- stream.emit('writing');
- });
- });
-
- describe('operation', function() {
- var NAME = 'op-name';
-
- it('should throw if a name is not provided', function() {
- assert.throws(function() {
- speech.operation();
- }, /A name must be specified for an operation\./);
- });
-
- it('should return an Operation object', function() {
- var operation = speech.operation(NAME);
- assert(operation instanceof FakeGrpcOperation);
- assert.strictEqual(operation.calledWith_[0], speech);
- assert.strictEqual(operation.calledWith_[1], NAME);
- });
- });
-
- describe('recognize', function() {
- var FILE = {};
- var FOUND_FILE = {};
- var CONFIG = {
- a: 'b',
- languageCode: 'en-US',
- };
- var DETECTED_ENCODING = 'LINEAR16';
-
- beforeEach(function() {
- Speech.detectEncoding_ = function() {
- return DETECTED_ENCODING;
- };
-
- Speech.findFile_ = function(files, callback) {
- callback(null, FOUND_FILE);
- };
-
- speech.api.Speech = {
- recognize: util.noop
- };
- });
-
- it('should throw if an object is not provided', function() {
- assert.throws(function() {
- speech.recognize(FILE, assert.ifError);
- }, /A recognize request requires a configuration object\./);
- });
-
- it('should find the files', function(done) {
- Speech.findFile_ = function(file) {
- assert.strictEqual(file, FILE);
- done();
- };
-
- speech.recognize(FILE, CONFIG, assert.ifError);
- });
-
- it('should make the correct request', function(done) {
- speech.api.Speech = {
- recognize: function(reqOpts) {
- var expectedConfig = extend({
- encoding: DETECTED_ENCODING,
- languageCode: 'en-US'
- }, CONFIG);
-
- assert.deepEqual(reqOpts.config, expectedConfig);
- assert.strictEqual(reqOpts.audio, FOUND_FILE);
-
- done();
- }
- };
-
- speech.recognize(FILE, CONFIG, assert.ifError);
- });
-
- it('should fail if no language code is set', function() {
- assert.throws(function() {
- speech.recognize(FILE, {});
- }, /languageCode/);
- });
-
- it('should allow setting a languageCode', function(done) {
- var languageCode = 'en-GB';
-
- var config = {
- languageCode: languageCode
- };
-
- speech.api.Speech = {
- recognize: function(reqOpts) {
- assert.strictEqual(reqOpts.config.languageCode, languageCode);
- done();
- }
- };
-
- speech.recognize(FILE, config, assert.ifError);
- });
-
- it('should respect the provided encoding', function(done) {
- var config = {
- encoding: 'LINEAR32',
- languageCode: 'en-US'
- };
-
- Speech.detectEncoding_ = function() {
- done(); // Will cause test to fail.
- };
-
- speech.api.Speech = {
- recognize: function(reqOpts) {
- assert.strictEqual(reqOpts.config.encoding, config.encoding);
- done();
- }
- };
-
- speech.recognize(FILE, config, assert.ifError);
- });
-
- it('should guess the encoding if it is not specified', function(done) {
- var expectedEncoding = 'LINEAR16';
-
- Speech.detectEncoding_ = function(file) {
- assert.strictEqual(file, FILE);
- return expectedEncoding;
- };
-
- speech.api.Speech = {
- recognize: function(reqOpts) {
- assert.strictEqual(reqOpts.config.encoding, expectedEncoding);
- done();
- }
- };
-
- speech.recognize(FILE, {languageCode: 'en-US'}, assert.ifError);
- });
-
- it('should return an error from findFile_', function(done) {
- var error = new Error('Error.');
-
- Speech.findFile_ = function(files, callback) {
- callback(error);
- };
-
- speech.recognize(FILE, CONFIG, function(err) {
- assert.strictEqual(err, error);
- done();
- });
- });
-
- describe('error', function() {
- var error = new Error('Error.');
- var apiResponse = {};
-
- beforeEach(function() {
- speech.api.Speech = {
- recognize: function(reqOpts, callback) {
- callback(error, apiResponse);
- }
- };
- });
-
- it('should return the error & API response', function(done) {
- speech.recognize(FILE, CONFIG, function(err, results, apiResponse_) {
- assert.strictEqual(err, error);
- assert.strictEqual(results, null);
- assert.strictEqual(apiResponse_, apiResponse);
- done();
- });
- });
- });
-
- describe('success', function() {
- var apiResponse = {
- results: []
- };
- var formattedResults = [];
-
- beforeEach(function() {
- Speech.formatResults_ = function() {
- return formattedResults;
- };
-
- speech.api.Speech = {
- recognize: function(reqOpts, callback) {
- callback(null, apiResponse);
- }
- };
- });
-
- it('should return the detections & API response', function(done) {
- Speech.formatResults_ = function(results, verboseMode) {
- assert.strictEqual(results, apiResponse.results);
- assert.strictEqual(verboseMode, false);
- return formattedResults;
- };
-
- speech.recognize(FILE, CONFIG, function(err, results, apiResponse_) {
- assert.ifError(err);
- assert.strictEqual(results, formattedResults);
- assert.strictEqual(apiResponse_, apiResponse);
- done();
- });
- });
-
- it('should return the detections in verbose mode', function(done) {
- Speech.formatResults_ = function(results, verboseMode) {
- assert.strictEqual(verboseMode, true);
- done();
- };
-
- var config = extend({}, CONFIG, {
- verbose: true
- });
-
- speech.recognize(FILE, config, assert.ifError);
- });
-
- it('should return the detections in verbose mode', function(done) {
- Speech.formatResults_ = function(results, verboseMode) {
- assert.strictEqual(verboseMode, true);
- done();
- };
-
- var config = extend({}, CONFIG, {
- verbose: true
- });
-
- speech.recognize(FILE, config, assert.ifError);
- });
-
- it('should delete verbose option from request object', function(done) {
- speech.api.Speech = {
- recognize: function(reqOpts) {
- assert.strictEqual(reqOpts.config.verbose, undefined);
- done();
- }
- };
-
- var config = extend({}, CONFIG, {
- verbose: true
- });
-
- speech.recognize(FILE, config, assert.ifError);
- });
- });
- });
-
- describe('startRecognition', function() {
- var FILE = {};
- var FOUND_FILE = {};
- var CONFIG = {
- a: 'b',
- languageCode: 'en-US'
- };
- var DETECTED_ENCODING = 'LINEAR16';
-
- beforeEach(function() {
- Speech.detectEncoding_ = function() {
- return DETECTED_ENCODING;
- };
-
- Speech.findFile_ = function(files, callback) {
- callback(null, FOUND_FILE);
- };
-
- speech.api.Speech = {
- longRunningRecognize: util.noop
- };
- });
-
- it('should find the files', function(done) {
- Speech.findFile_ = function(file) {
- assert.strictEqual(file, FILE);
- done();
- };
-
- speech.startRecognition(FILE, CONFIG, assert.ifError);
- });
-
- it('should make the correct request', function(done) {
- speech.api.Speech = {
- longRunningRecognize: function(reqOpts) {
- var expectedConfig = extend({}, CONFIG, {
- encoding: DETECTED_ENCODING,
- languageCode: 'en-US'
- });
-
- assert.deepEqual(reqOpts.config, expectedConfig);
- assert.strictEqual(reqOpts.audio, FOUND_FILE);
-
- done();
- }
- };
-
- speech.startRecognition(FILE, CONFIG, assert.ifError);
- });
-
- it('should error if no language code is given', function() {
- assert.throws(function() {
- speech.startRecognition(FILE, {});
- }, /languageCode/);
- });
-
- it('should respect the provided language code', function(done) {
- var languageCode = 'en-GB';
-
- var config = {
- languageCode: languageCode
- };
-
- speech.api.Speech = {
- longRunningRecognize: function(reqOpts) {
- assert.strictEqual(reqOpts.config.languageCode, languageCode);
- done();
- }
- };
-
- speech.startRecognition(FILE, config, assert.ifError);
- });
-
- it('should respect the provided encoding', function(done) {
- var config = {
- encoding: 'LINEAR32',
- languageCode: 'en-US'
- };
-
- Speech.detectEncoding_ = function() {
- done(); // Will cause test to fail.
- };
-
- speech.api.Speech = {
- longRunningRecognize: function(reqOpts) {
- assert.strictEqual(reqOpts.config.encoding, config.encoding);
- done();
- }
- };
-
- speech.startRecognition(FILE, config, assert.ifError);
- });
-
- it('should guess the encoding if it is not specified', function(done) {
- var expectedEncoding = 'LINEAR16';
-
- Speech.detectEncoding_ = function(file) {
- assert.strictEqual(file, FILE);
- return expectedEncoding;
- };
-
- speech.api.Speech = {
- longRunningRecognize: function(reqOpts) {
- assert.strictEqual(reqOpts.config.encoding, expectedEncoding);
- done();
- }
- };
-
- speech.startRecognition(FILE, {languageCode: 'en-US'}, assert.ifError);
- });
-
- it('should return an error from findFile_', function(done) {
- var error = new Error('Error.');
-
- Speech.findFile_ = function(files, callback) {
- callback(error);
- };
-
- speech.startRecognition(FILE, CONFIG, function(err) {
- assert.strictEqual(err, error);
- done();
- });
- });
-
- describe('error', function() {
- var error = new Error('Error.');
- var apiResponse = {};
-
- it('should return the error & API response', function(done) {
- speech.api.Speech = {
- longRunningRecognize: function(reqOpts, callback) {
- callback(error, null, apiResponse);
- }
- };
-
- speech.startRecognition(FILE, CONFIG, function(err, op, apiResponse_) {
- assert.strictEqual(err, error);
- assert.strictEqual(op, null);
- assert.strictEqual(apiResponse_, apiResponse);
- done();
- });
- });
- });
-
- describe('success', function() {
- var apiResponse = {
- name: 'operation-name',
- response: {
- value: 'value string to be decoded'
- }
- };
-
- it('should format the results', function(done) {
- speech.api.Speech = {
- longRunningRecognize: function(reqOpts, callback) {
- var operation = through.obj();
- callback(null, operation, apiResponse);
- }
- };
-
- var result = {
- results: []
- };
-
- var formattedResults = [];
- Speech.formatResults_ = function(results, verboseMode) {
- assert.strictEqual(results, result.results);
- assert.strictEqual(verboseMode, false);
- return formattedResults;
- };
-
- speech.startRecognition(FILE, CONFIG, function(err, operation) {
- assert.ifError(err);
-
- operation.emit('complete', result, null, null, function(err, resp) {
- assert.ifError(err);
- assert.strictEqual(resp, formattedResults);
- done();
- });
- });
- });
-
- it('should format results in verbose mode', function(done) {
- speech.api.Speech = {
- longRunningRecognize: function(reqOpts, callback) {
- var operation = through.obj();
- callback(null, operation, apiResponse);
- }
- };
-
- Speech.formatResults_ = function(results, verboseMode) {
- assert.strictEqual(verboseMode, true);
- done();
- };
-
- var config = extend({}, CONFIG, {
- verbose: true
- });
-
- speech.startRecognition(FILE, config, function(err, operation) {
- assert.ifError(err);
-
- operation.emit('complete', {}, null, null, assert.ifError);
- });
- });
-
- it('should delete verbose option from request object', function(done) {
- speech.api.Speech = {
- longRunningRecognize: function(reqOpts) {
- assert.strictEqual(reqOpts.config.verbose, undefined);
- done();
- }
- };
-
- var config = extend({}, CONFIG, {
- verbose: true
- });
-
- speech.startRecognition(FILE, config, assert.ifError);
- });
- });
- });
-});
diff --git a/packages/vision/package.json b/packages/vision/package.json
index cf04111ca7b..a51fc3c5cc9 100644
--- a/packages/vision/package.json
+++ b/packages/vision/package.json
@@ -66,17 +66,22 @@
"devDependencies": {
"@google-cloud/storage": "*",
"deep-strict-equal": "^0.2.0",
+ "intelli-espower-loader": "^1.0.1",
"mocha": "^3.0.1",
"multiline": "^1.0.2",
"node-uuid": "^1.4.7",
"normalize-newline": "^2.0.0",
+ "nyc": "^10.3.0",
+ "power-assert": "^1.4.2",
"proxyquire": "^1.7.10",
+ "sinon": "^2.2.0",
"tmp": "^0.0.31"
},
"scripts": {
+ "cover": "nyc --reporter=lcov --reporter=html mocha --no-timeouts --require intelli-espower-loader test/*.js && nyc report",
"publish-module": "node ../../scripts/publish.js vision",
- "test": "mocha test/*.js",
- "system-test": "mocha system-test/*.js --no-timeouts --bail"
+ "test": "mocha --require intelli-espower-loader test/*.js",
+ "system-test": "mocha system-test/*.js --require intelli-espower-loader --no-timeouts --bail"
},
"license": "Apache-2.0",
"engines": {
diff --git a/packages/vision/src/helpers.js b/packages/vision/src/helpers.js
new file mode 100644
index 00000000000..414e3c1fb14
--- /dev/null
+++ b/packages/vision/src/helpers.js
@@ -0,0 +1,200 @@
+/*!
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*!
+ * @module vision/helpers
+ */
+
+'use strict';
+
+var fs = require('fs');
+var is = require('is');
+
+var promisify = require('@google-cloud/common').util.promisify;
+var gax = require('google-gax');
+var protoFiles = require('google-proto-files');
+
+
+/*!
+ * Find a given image and fire a callback with the appropriate image structure.
+ *
+ * @param {Object} image - An object representing what is known about the
+ * image.
+ * @param {Function} callback - The callback to run.
+ */
+var coerceImage = (image, callback) => {
+ // If this is a buffer, read it and send the object
+ // that the Vision API expects.
+ if (Buffer.isBuffer(image)) {
+ callback(null, {
+ content: image.toString('base64')
+ });
+ return;
+ }
+
+ // File exists on disk.
+ if (image.source && image.source.filename) {
+ fs.readFile(image.source.filename, {encoding: 'base64'}, (err, blob) => {
+ if (err) {
+ callback(err);
+ return;
+ }
+ callback(null, {content: blob.toString('base64')});
+ });
+ return;
+ }
+
+ // No other options were relevant; return the image with no modification.
+ callback(null, image);
+ return;
+};
+
+
+/*!
+ * Return a method that calls annotateImage asking for a single feature.
+ *
+ * @param {Number} featureValue - The feature being requested. This is taken
+ * from the Feature.Type enum, and will be an integer.
+ *
+ * @return {Function} - The function that, when called, will call annotateImage
+ * asking for the single feature annotation.
+ */
+var _createSingleFeatureMethod = featureValue => {
+ return function(image, options) {
+ return this.annotateImage({
+ image: image,
+ features: [{type: featureValue}],
+ }, options);
+ };
+};
+
+
+/*!
+ * Return a dictionary-like object with helpers to augment the Vision
+ * GAPIC.
+ *
+ * @param {string} apiVersion - The API version (e.g. "v1")
+ *
+ * @return {Object} - An object with keys and functions which are placed
+ * onto the pure GAPIC.
+ */
+module.exports = apiVersion => {
+ var methods = {};
+
+ /**
+ * Annotate a single image with the requested features.
+ *
+ * @param {Object=} request
+ * A representation of the request being sent to the Vision API.
+ * @param {Object=} request.image
+ * A dictionary-like object representing the image. This should have a
+ * single key (`source`, `content`).
+ *
+ * If the key is `source`, the value should be another object containing
+ * `image_uri` or `filename` as a key and a string as a value.
+ *
+ * If the key is `content`, the value should be a Buffer.
+ * @param {Array} request.features
+ * An array of the specific annotation features being requested.
+ * @param {Object=} options
+ * Optional parameters. You can override the default settings for this
+ * call, e.g, timeout, retries, paginations, etc. See [gax.CallOptions]
+ * {@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions}
+ * for the details.
+ * @param {function(?Error, ?Object)=} callback
+ * The function which will be called with the result of the API call.
+ *
+ * The second parameter to the callback is an object representing
+ * [AnnotateImagesResponse]{@link AnnotateImagesResponse}.
+ * @return {Promise} - The promise which resolves to an array.
+ * The first element of the array is an object representing
+ * [AnnotateImagesResponse]{@link AnnotateImagesResponse}.
+ * The promise has a method named "cancel" which cancels the ongoing
+ * API call.
+ *
+ * @example
+ * var request = {
+ * image: {source: {image_uri: 'gs://path/to/image.jpg'}},
+ * features: [],
+ * };
+ * vision.annotateImage(request).then(response => {
+ * // doThingsWith(response);
+ * }).catch(err => {
+ * console.error(err);
+ * });
+ */
+ methods.annotateImage = promisify(function(request, options, callback) {
+ // If a callback was provided and options were skipped, normalize
+ // the argument names.
+ if (is.undefined(callback) && is.function(options)) {
+ callback = options;
+ options = undefined;
+ }
+
+ // If there is no image, throw an exception.
+ if (is.undefined(request.image)) {
+ throw new Error('Attempted to call `annotateImage` with no image.');
+ }
+
+ // If we got a filename for the image, open the file and transform
+ // it to content.
+ return coerceImage(request.image, (err, image) => {
+ if (err) {
+ return callback(err);
+ }
+ request.image = image;
+
+ // Call the GAPIC batch annotation function.
+ return this.batchAnnotateImages([request], options, (err, r) => {
+ // If there is an error, handle it.
+ if (err) {
+ return callback(err);
+ }
+
+ // We are guaranteed to only have one response element, since we
+ // only sent one image.
+ var response = r.responses[0];
+
+ // Fire the callback if applicable.
+ return callback(undefined, response);
+ });
+ });
+ });
+
+ // Get a list of features available on the API, and iterate over them
+ // and create single-feature methods for each.
+ var features = gax.grpc().load([{
+ root: protoFiles('..'),
+ file: `google/cloud/vision/${apiVersion}/image_annotator.proto`,
+ }]).google.cloud.vision[apiVersion].Feature.Type;
+ for (let feature of Object.keys(features)) {
+ // Determine the method name that should be used for the helper method,
+ // based on the Feature.Type enum in the proto.
+ //
+ // The method name should be camelCased (e.g. "FACE_DETECTION" becomes
+ // "faceDetection").
+ let featureValue = features[feature];
+ let methodName = feature.toLowerCase().replace(/_([\w])/, match => {
+ return match[1].toUpperCase();
+ });
+
+ // Assign the single-feature method to the `methods` object.
+ methods[methodName] = promisify(_createSingleFeatureMethod(featureValue));
+ }
+
+ // Done; return the dictionary of methods.
+ return methods;
+};
diff --git a/packages/vision/src/index.js b/packages/vision/src/index.js
index d2b55077e91..a29422fbb9e 100644
--- a/packages/vision/src/index.js
+++ b/packages/vision/src/index.js
@@ -16,2101 +16,54 @@
/*!
* @module vision
+ * @name Vision
*/
'use strict';
-var arrify = require('arrify');
-var async = require('async');
-var common = require('@google-cloud/common');
-var commonGrpc = require('@google-cloud/common-grpc');
-var extend = require('extend');
-var format = require('string-format-obj');
-var fs = require('fs');
var is = require('is');
-var prop = require('propprop');
-var propAssign = require('prop-assign');
-var rgbHex = require('rgb-hex');
-var v1 = require('./v1');
+var gapic = {
+ v1: require('./v1'),
+};
+var helpers = require('./helpers');
+
+const VERSION = require('../package.json').version;
-var VERY_UNLIKELY = 0;
-var UNLIKELY = 1;
-var POSSIBLE = 2;
-var LIKELY = 3;
-var VERY_LIKELY = 4;
/**
- * The [Cloud Vision API](https://cloud.google.com/vision/docs) allows easy
- * integration of vision detection features, including image labeling, face and
- * landmark detection, optical character recognition (OCR), and tagging of
- * explicit content.
+ * Create an imageAnnotatorClient with additional helpers for common
+ * tasks.
*
* @constructor
* @alias module:vision
- *
- * @resource [Getting Started]{@link https://cloud.google.com/vision/docs/getting-started}
- * @resource [Image Best Practices]{@link https://cloud.google.com/vision/docs/image-best-practices}
- *
- * @param {object} options - [Configuration object](#/docs).
- */
-function Vision(options) {
- if (!(this instanceof Vision)) {
- options = common.util.normalizeArguments(this, options);
- return new Vision(options);
- }
-
- options = extend({}, options, {
- libName: 'gccl',
- libVersion: require('../package.json').version
- });
-
- this.api = {
- Vision: v1(options).imageAnnotatorClient(options)
- };
+ * @mixes module:vision/helpers
+ *
+ * @param {Object=} opts - The optional parameters.
+ * @param {String=} opts.servicePath
+ * The domain name of the API remote host.
+ * @param {number=} opts.port
+ * The port on which to connect to the remote host.
+ * @param {grpc.ClientCredentials=} opts.sslCreds
+ * A ClientCredentials for use with an SSL-enabled channel.
+ * @param {Object=} opts.clientConfig
+ * The customized config to build the call settings. See
+ * {@link gax.constructSettings} for the format.
+ */
+function visionV1(opts) {
+ // Define the header options.
+ opts = opts || {};
+ opts.libName = 'gccl';
+ opts.libVersion = VERSION;
+
+ // Create the image annotator client with the provided options.
+ var client = gapic.v1(opts).imageAnnotatorClient(opts);
+ if (is.undefined(client.annotateImage)) {
+ Object.assign(client.constructor.prototype, helpers('v1'));
+ }
+ return client;
}
-Vision.likelihood = {
- VERY_UNLIKELY: VERY_UNLIKELY,
- UNLIKELY: UNLIKELY,
- POSSIBLE: POSSIBLE,
- LIKELY: LIKELY,
- VERY_LIKELY: VERY_LIKELY
-};
-
-/**
- * Run image detection and annotation for an image or batch of images.
- *
- * This is an advanced API method that requires raw
- * [`AnnotateImageRequest`](https://cloud.google.com/vision/reference/rest/v1/images/annotate#AnnotateImageRequest)
- * objects to be provided. If that doesn't sound like what you're looking for,
- * you'll probably appreciate {module:vision#detect}.
- *
- * @resource [images.annotate API Reference]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate}
- *
- * @param {object|object[]} requests - An `AnnotateImageRequest` or array of
- * `AnnotateImageRequest`s. See an
- * [`AnnotateImageRequest`](https://cloud.google.com/vision/reference/rest/v1/images/annotate#AnnotateImageRequest).
- * @param {function} callback - The callback function.
- * @param {?error} callback.err - An error returned while making this request.
- * @param {object} callback.annotations - See an
- * [`AnnotateImageResponse`](https://cloud.google.com/vision/reference/rest/v1/images/annotate#AnnotateImageResponse).
- * @param {object} callback.apiResponse - Raw API response.
- *
- * @example
- * var annotateImageReq = {
- * // See the link in the parameters for `AnnotateImageRequest`.
- * };
- *
- * vision.annotate(annotateImageReq, function(err, annotations, apiResponse) {
- * // annotations = apiResponse.responses
- * });
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * vision.annotate(annotateImageReq).then(function(data) {
- * var annotations = data[0];
- * var apiResponse = data[1];
- * });
- */
-Vision.prototype.annotate = function(requests, callback) {
- this.api.Vision.batchAnnotateImages({
- requests: arrify(requests)
- }, function(err, resp) {
- if (err) {
- callback(err, null, resp);
- return;
- }
-
- callback(null, resp.responses, resp);
- });
-};
-
-// jscs:disable maximumLineLength
-/**
- * Detect properties from an image (or images) of one or more types.
- *
- * API simplifications
- *
- * The raw API response will return some values in a range from `VERY_UNLIKELY`
- * to `VERY_LIKELY`. For simplification, any value less than `LIKELY` is
- * converted to `false`.
- *
- * - **False**
- * - `VERY_UNLIKELY`
- * - `UNLIKELY`
- * - `POSSIBLE`
- * - **True**
- * - `LIKELY`
- * - `VERY_LIKELY`
- *
- * The API will also return many values represented in a `[0,1]` range. We
- * convert these to a `[0,100]` value. E.g, `0.4` is represented as `40`.
- *
- * For the response in the original format, review the `apiResponse` argument
- * your callback receives.
- *
- * @param {string|string[]|buffer|buffer[]|module:storage/file|module:storage/file[]} images - The
- * source image(s) to run the detection on. It can be either a local image
- * path, a remote image URL, a Buffer, or a @google-cloud/storage File
- * object.
- * @param {string[]|object=} options - An array of types or a configuration
- * object.
- * @param {object=} options.imageContext - See an
- * [`ImageContext`](https://cloud.google.com/vision/reference/rest/v1/images/annotate#ImageContext)
- * resource.
- * @param {number} options.maxResults - The maximum number of results, per type,
- * to return in the response.
- * @param {string[]} options.types - An array of feature types to detect from
- * the provided images. Acceptable values: `crops`, `document`, `faces`,
- * `landmarks`, `labels`, `logos`, `properties`, `safeSearch`, `similar`,
- * `text`.
- * @param {boolean=} options.verbose - Use verbose mode, which returns a less-
- * simplistic representation of the annotation (default: `false`).
- * @param {function} callback - The callback function.
- * @param {?error} callback.err - An error returned while making this request.
- * @param {object[]} callback.err.errors - If present, these represent partial
- * failures. It's possible for part of your request to be completed
- * successfully, while a single feature request was not successful.
- * @param {object|object[]} callback.detections - If a single detection type was
- * asked for, it will be returned in its raw form; either an object or array
- * of objects. If multiple detection types were requested, you will receive
- * an object with keys for each detection type (listed above in
- * `config.types`). Additionally, if multiple images were provided, you will
- * receive an array of detection objects, each representing an image. See
- * the examples below for more information.
- * @param {object} callback.apiResponse - Raw API response.
- *
- * @example
- * var types = [
- * 'face',
- * 'label'
- * ];
- *
- * vision.detect('image.jpg', types, function(err, detections, apiResponse) {
- * // detections = {
- * // faces: [...],
- * // labels: [...]
- * // }
- * });
- *
- * //-
- * // Run feature detection over a remote image.
- * //-
- * var img = 'https://upload.wikimedia.org/wikipedia/commons/5/51/Google.png';
- *
- * vision.detect(img, types, function(err, detection, apiResponse) {});
- *
- * //-
- * // Run feature detection over a Buffer.
- * //-
- * var level = require('level');
- * var db = level('./users-database');
- *
- * db.get('user-image', { encoding: 'binary' }, function(err, image) {
- * if (err) {
- * // Error handling omitted.
- * }
- *
- * vision.detect(image, types, function(err, detection, apiResponse) {});
- * });
- *
- * //-
- * // If you have a base64 string, provide it in a Buffer.
- * //-
- * var myBase64ImageRepresentation = '...';
- *
- * var image = new Buffer(myBase64ImageRepresentation, 'base64');
- *
- * vision.detect(image, types, function(err, detection, apiResponse) {});
- *
- * //-
- * // Supply multiple images for feature detection.
- * //-
- * var images = [
- * 'image.jpg',
- * 'image-two.jpg'
- * ];
- *
- * var types = [
- * 'face',
- * 'label'
- * ];
- *
- * vision.detect(images, types, function(err, detections, apiResponse) {
- * // detections = [
- * // // Detections for image.jpg:
- * // {
- * // faces: [...],
- * // labels: [...]
- * // },
- * //
- * // // Detections for image-two.jpg:
- * // {
- * // faces: [...],
- * // labels: [...]
- * // }
- * // ]
- * });
- *
- * //-
- * // It's possible for part of your request to be completed successfully, while
- * // a single feature request was not successful.
- * //-
- * vision.detect('malformed-image.jpg', types, function(err, detections) {
- * if (err) {
- * // An API error or partial failure occurred.
- *
- * if (err.name === 'PartialFailureError') {
- * // err.errors = [
- * // {
- * // image: 'malformed-image.jpg',
- * // errors: [
- * // {
- * // code: 400,
- * // message: 'Bad image data',
- * // type: 'faces'
- * // },
- * // {
- * // code: 400,
- * // message: 'Bad image data',
- * // type: 'labels'
- * // }
- * // ]
- * // }
- * // ]
- * }
- * }
- *
- * // `detections` will still be populated with all of the results that could
- * // be annotated.
- * });
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * vision.detect('image.jpg', types).then(function(data) {
- * var detections = data[0];
- * var apiResponse = data[1];
- * });
- */
-Vision.prototype.detect = function(images, options, callback) {
- var self = this;
- var isSingleImage = !is.array(images) || images.length === 1;
-
- if (!is.object(options)) {
- options = {
- types: options
- };
- }
-
- var types = arrify(options.types);
-
- var typeShortNameToFullName = {
- crop: 'CROP_HINTS',
- crops: 'CROP_HINTS',
-
- doc: 'DOCUMENT_TEXT_DETECTION',
- document: 'DOCUMENT_TEXT_DETECTION',
-
- face: 'FACE_DETECTION',
- faces: 'FACE_DETECTION',
-
- label: 'LABEL_DETECTION',
- labels: 'LABEL_DETECTION',
-
- landmark: 'LANDMARK_DETECTION',
- landmarks: 'LANDMARK_DETECTION',
-
- logo: 'LOGO_DETECTION',
- logos: 'LOGO_DETECTION',
-
- properties: 'IMAGE_PROPERTIES',
-
- safeSearch: 'SAFE_SEARCH_DETECTION',
-
- similar: 'WEB_DETECTION',
-
- text: 'TEXT_DETECTION'
- };
-
- var typeShortNameToRespName = {
- crop: 'cropHintsAnnotation',
- crops: 'cropHintsAnnotation',
-
- doc: 'fullTextAnnotation',
- document: 'fullTextAnnotation',
-
- face: 'faceAnnotations',
- faces: 'faceAnnotations',
-
- label: 'labelAnnotations',
- labels: 'labelAnnotations',
-
- landmark: 'landmarkAnnotations',
- landmarks: 'landmarkAnnotations',
-
- logo: 'logoAnnotations',
- logos: 'logoAnnotations',
-
- properties: 'imagePropertiesAnnotation',
-
- safeSearch: 'safeSearchAnnotation',
-
- similar: 'webDetection',
-
- text: 'textAnnotations'
- };
-
- var typeRespNameToShortName = {
- cropHintsAnnotation: 'crops',
- faceAnnotations: 'faces',
- fullTextAnnotation: 'document',
- imagePropertiesAnnotation: 'properties',
- labelAnnotations: 'labels',
- landmarkAnnotations: 'landmarks',
- logoAnnotations: 'logos',
- safeSearchAnnotation: 'safeSearch',
- textAnnotations: 'text',
- webDetection: 'similar'
- };
-
- Vision.findImages_(images, function(err, foundImages) {
- if (err) {
- callback(err);
- return;
- }
-
- var config = [];
-
- foundImages.forEach(function(image) {
- types.forEach(function(type) {
- var typeName = typeShortNameToFullName[type];
-
- if (!typeName) {
- throw new Error('Requested detection feature not found: ' + type);
- }
-
- var cfg = {
- image: image,
- features: [
- {
- type: typeName
- }
- ]
- };
-
- if (is.object(options.imageContext)) {
- cfg.imageContext = options.imageContext;
- }
-
- if (is.number(options.maxResults)) {
- cfg.features.map(propAssign('maxResults', options.maxResults));
- }
-
- config.push(cfg);
- });
- });
-
- self.annotate(config, function(err, annotations, resp) {
- if (err) {
- callback(err, null, resp);
- return;
- }
-
- var originalResp = extend(true, {}, resp);
- var partialFailureErrors = [];
-
- var detections = foundImages
- .map(groupDetectionsByImage)
- .map(removeExtraneousAnnotationObjects)
- .map(assignTypeToEmptyAnnotations)
- .map(removeDetectionsWithErrors)
- .map(flattenAnnotations)
- .map(decorateAnnotations);
-
- if (partialFailureErrors.length > 0) {
- err = new common.util.PartialFailureError({
- errors: partialFailureErrors,
- response: originalResp
- });
- }
-
- if (isSingleImage && detections.length > 0) {
- // If only a single image was given, expose it from the array.
- detections = detections[0];
- }
-
- callback(err, detections, originalResp);
-
- function groupDetectionsByImage() {
- // detections = [
- // // Image one:
- // [
- // {
- // faceAnnotations: {},
- // labelAnnotations: {},
- // ...
- // }
- // ],
- //
- // // Image two:
- // [
- // {
- // faceAnnotations: {},
- // labelAnnotations: {},
- // ...
- // }
- // ]
- // ]
- return annotations.splice(0, types.length);
- }
-
- function removeExtraneousAnnotationObjects(annotations) {
- // The API response includes empty annotations for features that weren't
- // requested.
- //
- // Before:
- // [
- // {
- // faceAnnotations: {},
- // labelAnnotations: {}
- // }
- // ]
- //
- // After:
- // [
- // {
- // faceAnnotations: {}
- // }
- // ]
- return annotations.map(function(annotation, index) {
- var requestedAnnotationType = typeShortNameToRespName[types[index]];
-
- for (var prop in annotation) {
- if (prop !== requestedAnnotationType && prop !== 'error') {
- /* istanbul ignore next */
- delete annotation[prop];
- }
- }
-
- return annotation;
- });
- }
-
- function assignTypeToEmptyAnnotations(annotations) {
- // Before:
- // [
- // {}, // What annotation type was attempted?
- // { labelAnnotations: {...} }
- // ]
- //
- // After:
- // [
- // { faceAnnotations: [] },
- // { labelAnnotations: {...} }
- // ]
- return annotations.map(function(annotation, index) {
- var detectionType = types[index];
- var typeName = typeShortNameToRespName[detectionType];
-
- if (is.empty(annotation) || annotation.error) {
- var isPlural = typeName.charAt(typeName.length - 1) === 's';
- annotation[typeName] = isPlural ? [] : {};
- }
-
- return annotation;
- });
- }
-
- function removeDetectionsWithErrors(annotations, index) {
- // Before:
- // [
- // {
- // faceAnnotations: []
- // },
- // {
- // error: {...},
- // imagePropertiesAnnotation: {}
- // }
- // ]
-
- // After:
- // [
- // {
- // faceAnnotations: []
- // },
- // undefined
- // ]
- var errors = [];
-
- annotations.forEach(function(annotation, index) {
- if (!is.empty(annotation.error)) {
- var userInputType = types[index];
- var respNameType = typeShortNameToRespName[userInputType];
- annotation.error.type = typeRespNameToShortName[respNameType];
- errors.push(Vision.formatError_(annotation.error));
- }
- });
-
- if (errors.length > 0) {
- partialFailureErrors.push({
- image: isSingleImage ? images : images[index],
- errors: errors
- });
-
- return;
- }
-
- return annotations;
- }
-
- function flattenAnnotations(annotations) {
- return extend.apply(null, annotations);
- }
-
- function formatAnnotationBuilder(type) {
- return function(annotation) {
- if (is.empty(annotation)) {
- return annotation;
- }
-
- var formatMethodMap = {
- cropHintsAnnotation: Vision.formatCropHintsAnnotation_,
- error: Vision.formatError_,
- faceAnnotations: Vision.formatFaceAnnotation_,
- fullTextAnnotation: Vision.formatFullTextAnnotation_,
- imagePropertiesAnnotation: Vision.formatImagePropertiesAnnotation_,
- labelAnnotations: Vision.formatEntityAnnotation_,
- landmarkAnnotations: Vision.formatEntityAnnotation_,
- logoAnnotations: Vision.formatEntityAnnotation_,
- safeSearchAnnotation: Vision.formatSafeSearchAnnotation_,
- textAnnotations: Vision.formatEntityAnnotation_,
- webDetection: Vision.formatWebDetection_
- };
-
- var formatMethod = formatMethodMap[type] || function(annotation) {
- return annotation;
- };
-
- return formatMethod(annotation, options);
- };
- }
-
- function decorateAnnotations(annotations) {
- for (var annotationType in annotations) {
- if (annotations.hasOwnProperty(annotationType)) {
- var annotationGroup = arrify(annotations[annotationType]);
-
- var formattedAnnotationGroup = annotationGroup
- .map(formatAnnotationBuilder(annotationType));
-
- // An annotation can be singular, e.g. SafeSearch. It is either
- // violent or not. Unlike face detection, where there can be
- // multiple results.
- //
- // Be sure the original type (object or array) is preserved and
- // not wrapped in an array if it wasn't originally.
- if (!is.array(annotations[annotationType])) {
- formattedAnnotationGroup = formattedAnnotationGroup[0];
- }
-
- delete annotations[annotationType];
- var typeShortName = typeRespNameToShortName[annotationType];
- annotations[typeShortName] = formattedAnnotationGroup;
- }
- }
-
- if (types.length === 1) {
- // Only a single detection type was asked for, so no need to box in
- // the results. Make them accessible without using a key.
- var key = typeRespNameToShortName[typeShortNameToRespName[types[0]]];
- annotations = annotations[key];
- }
-
- return annotations;
- }
- });
- });
-};
-
-// jscs:enable maximumLineLength
-
-/**
- * Detect the crop hints within an image.
- *
- * Parameters
- *
- * See {module:vision#detect}.
- *
- * @resource [CropHintsAnnotation JSON respresentation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#CropHintsAnnotation}
- *
- * @example
- * vision.detectCrops('image.jpg', function(err, crops, apiResponse) {
- * // crops = [
- * // [
- * // {
- * // x: 1
- * // },
- * // {
- * // x: 295
- * // },
- * // {
- * // x: 295,
- * // y: 301
- * // },
- * // {
- * // x: 1,
- * // y: 301
- * // }
- * // ],
- * // // ...
- * // ]
- * });
- *
- * //-
- * // Activate `verbose` mode for a more detailed response.
- * //-
- * var options = {
- * verbose: true
- * };
- *
- * vision.detectCrops('image.jpg', options, function(err, crops, apiResponse) {
- * // crops = [
- * // {
- * // bounds: [
- * // {
- * // x: 1
- * // },
- * // {
- * // x: 295
- * // },
- * // {
- * // x: 295,
- * // y: 301
- * // },
- * // {
- * // x: 1,
- * // y: 301
- * // }
- * // ],
- * // confidence: 0.799999995
- * // },
- * // // ...
- * // ]
- * });
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * vision.detectCrops('image.jpg').then(function(data) {
- * var crops = data[0];
- * var apiResponse = data[1];
- * });
- */
-Vision.prototype.detectCrops = function(images, options, callback) {
- if (is.fn(options)) {
- callback = options;
- options = {};
- }
-
- options = extend({}, options, {
- types: ['crops']
- });
-
- this.detect(images, options, callback);
-};
-
-/**
- * Run face detection against an image.
- *
- * Parameters
- *
- * See {module:vision#detect}.
- *
- * @resource [FaceAnnotation JSON respresentation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#FaceAnnotation}
- *
- * @example
- * vision.detectFaces('image.jpg', function(err, faces, apiResponse) {
- * // faces = [
- * // {
- * // angles: {
- * // pan: -8.1090336,
- * // roll: -5.0002542,
- * // tilt: 18.012161
- * // },
- * // bounds: {
- * // head: [
- * // {
- * // x: 1
- * // },
- * // {
- * // x: 295
- * // },
- * // {
- * // x: 295,
- * // y: 301
- * // },
- * // {
- * // x: 1,
- * // y: 301
- * // }
- * // ],
- * // face: [
- * // {
- * // x: 28,
- * // y: 40
- * // },
- * // {
- * // x: 250,
- * // y: 40
- * // },
- * // {
- * // x: 250,
- * // y: 262
- * // },
- * // {
- * // x: 28,
- * // y: 262
- * // }
- * // ]
- * // },
- * // features: {
- * // confidence: 34.489909,
- * // chin: {
- * // center: {
- * // x: 143.34183,
- * // y: 262.22998,
- * // z: -57.388493
- * // },
- * // left: {
- * // x: 63.102425,
- * // y: 248.99081,
- * // z: 44.207638
- * // },
- * // right: {
- * // x: 241.72728,
- * // y: 225.53488,
- * // z: 19.758242
- * // }
- * // },
- * // ears: {
- * // left: {
- * // x: 54.872219,
- * // y: 207.23712,
- * // z: 97.030685
- * // },
- * // right: {
- * // x: 252.67567,
- * // y: 180.43124,
- * // z: 70.15992
- * // }
- * // },
- * // eyebrows: {
- * // left: {
- * // left: {
- * // x: 58.790176,
- * // y: 113.28249,
- * // z: 17.89735
- * // },
- * // right: {
- * // x: 106.14151,
- * // y: 98.593758,
- * // z: -13.116687
- * // },
- * // top: {
- * // x: 80.248711,
- * // y: 94.04303,
- * // z: 0.21131183
- * // }
- * // },
- * // right: {
- * // left: {
- * // x: 148.61565,
- * // y: 92.294594,
- * // z: -18.804882
- * // },
- * // right: {
- * // x: 204.40808,
- * // y: 94.300117,
- * // z: -2.0009689
- * // },
- * // top: {
- * // x: 174.70135,
- * // y: 81.580917,
- * // z: -12.702137
- * // }
- * // }
- * // },
- * // eyes: {
- * // left: {
- * // bottom: {
- * // x: 84.883934,
- * // y: 134.59479,
- * // z: -2.8677137
- * // },
- * // center: {
- * // x: 83.707092,
- * // y: 128.34,
- * // z: -0.00013388535
- * // },
- * // left: {
- * // x: 72.213913,
- * // y: 132.04138,
- * // z: 9.6985674
- * // },
- * // pupil: {
- * // x: 86.531624,
- * // y: 126.49807,
- * // z: -2.2496929
- * // },
- * // right: {
- * // x: 105.28892,
- * // y: 125.57655,
- * // z: -2.51554
- * // },
- * // top: {
- * // x: 86.706947,
- * // y: 119.47144,
- * // z: -4.1606765
- * // }
- * // },
- * // right: {
- * // bottom: {
- * // x: 179.30353,
- * // y: 121.03307,
- * // z: -14.843414
- * // },
- * // center: {
- * // x: 181.17694,
- * // y: 115.16437,
- * // z: -12.82961
- * // },
- * // left: {
- * // x: 158.2863,
- * // y: 118.491,
- * // z: -9.723031
- * // },
- * // pupil: {
- * // x: 175.99976,
- * // y: 114.64407,
- * // z: -14.53744
- * // },
- * // right: {
- * // x: 194.59413,
- * // y: 115.91954,
- * // z: -6.952745
- * // },
- * // top: {
- * // x: 173.99446,
- * // y: 107.94287,
- * // z: -16.050705
- * // }
- * // }
- * // },
- * // forehead: {
- * // x: 126.53813,
- * // y: 93.812057,
- * // z: -18.863352
- * // },
- * // lips: {
- * // bottom: {
- * // x: 137.28528,
- * // y: 219.23564,
- * // z: -56.663128
- * // },
- * // top: {
- * // x: 134.74164,
- * // y: 192.50438,
- * // z: -53.876408
- * // }
- * // },
- * // mouth: {
- * // center: {
- * // x: 136.43481,
- * // y: 204.37952,
- * // z: -51.620205
- * // },
- * // left: {
- * // x: 104.53558,
- * // y: 214.05037,
- * // z: -30.056231
- * // },
- * // right: {
- * // x: 173.79134,
- * // y: 204.99333,
- * // z: -39.725758
- * // }
- * // },
- * // nose: {
- * // bottom: {
- * // center: {
- * // x: 133.81947,
- * // y: 173.16437,
- * // z: -48.287724
- * // },
- * // left: {
- * // x: 110.98372,
- * // y: 173.61331,
- * // z: -29.7784
- * // },
- * // right: {
- * // x: 161.31354,
- * // y: 168.24527,
- * // z: -36.1628
- * // }
- * // },
- * // tip: {
- * // x: 128.14919,
- * // y: 153.68129,
- * // z: -63.198204
- * // },
- * // top: {
- * // x: 127.83745,
- * // y: 110.17557,
- * // z: -22.650913
- * // }
- * // }
- * // },
- * // confidence: 56.748849,
- * // anger: false,
- * // angerLikelihood: 1,
- * // blurred: false,
- * // blurredLikelihood: 1,
- * // headwear: false,
- * // headwearLikelihood: 1,
- * // joy: false,
- * // joyLikelihood: 1,
- * // sorrow: false,
- * // sorrowLikelihood: 1,
- * // surprise: false,
- * // surpriseLikelihood: 1,
- * // underExposed: false,
- * // underExposedLikelihood: 1
- * // }
- * // ]
- * });
- *
- * //-
- * // Our library simplifies the response from the API. Use the map below to see
- * // each response name's original name.
- * //-
- * var shortNameToLongNameMap = {
- * chin: {
- * center: 'CHIN_GNATHION',
- * left: 'CHIN_LEFT_GONION',
- * right: 'CHIN_RIGHT_GONION'
- * },
- *
- * ears: {
- * left: 'LEFT_EAR_TRAGION',
- * right: 'RIGHT_EAR_TRAGION'
- * },
- *
- * eyebrows: {
- * left: {
- * left: 'LEFT_OF_LEFT_EYEBROW',
- * right: 'RIGHT_OF_LEFT_EYEBROW',
- * top: 'LEFT_EYEBROW_UPPER_MIDPOINT'
- * },
- * right: {
- * left: 'LEFT_OF_RIGHT_EYEBROW',
- * right: 'RIGHT_OF_RIGHT_EYEBROW',
- * top: 'RIGHT_EYEBROW_UPPER_MIDPOINT'
- * }
- * },
- *
- * eyes: {
- * left: {
- * bottom: 'LEFT_EYE_BOTTOM_BOUNDARY',
- * center: 'LEFT_EYE',
- * left: 'LEFT_EYE_LEFT_CORNER',
- * pupil: 'LEFT_EYE_PUPIL',
- * right: 'LEFT_EYE_RIGHT_CORNER',
- * top: 'LEFT_EYE_TOP_BOUNDARY'
- * },
- * right: {
- * bottom: 'RIGHT_EYE_BOTTOM_BOUNDARY',
- * center: 'RIGHT_EYE',
- * left: 'RIGHT_EYE_LEFT_CORNER',
- * pupil: 'RIGHT_EYE_PUPIL',
- * right: 'RIGHT_EYE_RIGHT_CORNER',
- * top: 'RIGHT_EYE_TOP_BOUNDARY'
- * }
- * },
- *
- * forehead: 'FOREHEAD_GLABELLA',
- *
- * lips: {
- * bottom: 'LOWER_LIP',
- * top: 'UPPER_LIP'
- * },
- *
- * mouth: {
- * center: 'MOUTH_CENTER',
- * left: 'MOUTH_LEFT',
- * right: 'MOUTH_RIGHT'
- * },
- *
- * nose: {
- * bottom: {
- * center: 'NOSE_BOTTOM_CENTER',
- * left: 'NOSE_BOTTOM_LEFT',
- * right: 'NOSE_BOTTOM_RIGHT'
- * },
- * tip: 'NOSE_TIP',
- * top: 'MIDPOINT_BETWEEN_EYES'
- * }
- * };
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * vision.detectFaces('image.jpg').then(function(data) {
- * var faces = data[0];
- * var apiResponse = data[1];
- * });
- */
-Vision.prototype.detectFaces = function(images, options, callback) {
- if (is.fn(options)) {
- callback = options;
- options = {};
- }
-
- options = extend({}, options, {
- types: ['faces']
- });
-
- this.detect(images, options, callback);
-};
-
-/**
- * Annotate an image with descriptive labels.
- *
- * Parameters
- *
- * See {module:vision#detect}.
- *
- * @resource [EntityAnnotation JSON representation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#EntityAnnotation}
- *
- * @example
- * vision.detectLabels('image.jpg', function(err, labels, apiResponse) {
- * // labels = [
- * // 'classical sculpture',
- * // 'statue',
- * // 'landmark',
- * // 'ancient history',
- * // 'artwork'
- * // ]
- * });
- *
- * //-
- * // Activate `verbose` mode for a more detailed response.
- * //-
- * var opts = {
- * verbose: true
- * };
- *
- * vision.detectLabels('image.jpg', opts, function(err, labels, apiResponse) {
- * // labels = [
- * // {
- * // desc: 'classical sculpture',
- * // id: '/m/095yjj',
- * // score: 98.092282
- * // },
- * // {
- * // desc: 'statue',
- * // id: '/m/013_1c',
- * // score: 90.66112
- * // },
- * // // ...
- * // ]
- * });
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * vision.detectLabels('image.jpg').then(function(data) {
- * var labels = data[0];
- * var apiResponse = data[1];
- * });
- */
-Vision.prototype.detectLabels = function(images, options, callback) {
- if (is.fn(options)) {
- callback = options;
- options = {};
- }
-
- options = extend({}, options, {
- types: ['labels']
- });
-
- this.detect(images, options, callback);
-};
-
-/**
- * Detect the landmarks from an image.
- *
- * Parameters
- *
- * See {module:vision#detect}.
- *
- * @resource [EntityAnnotation JSON representation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#EntityAnnotation}
- *
- * @example
- * vision.detectLandmarks('image.jpg', function(err, landmarks, apiResponse) {
- * // landmarks = [
- * // 'Mount Rushmore'
- * // ]
- * });
- *
- * //-
- * // Activate `verbose` mode for a more detailed response.
- * //-
- * var image = 'image.jpg';
- *
- * var opts = {
- * verbose: true
- * };
- *
- * vision.detectLandmarks(image, opts, function(err, landmarks, apiResponse) {
- * // landmarks = [
- * // {
- * // desc: 'Mount Rushmore',
- * // id: '/m/019dvv',
- * // score: 28.651705,
- * // bounds: [
- * // {
- * // x: 79,
- * // y: 130
- * // },
- * // {
- * // x: 284,
- * // y: 130
- * // },
- * // {
- * // x: 284,
- * // y: 226
- * // },
- * // {
- * // x: 79,
- * // y: 226
- * // }
- * // ],
- * // locations: [
- * // {
- * // latitude: 43.878264,
- * // longitude: -103.45700740814209
- * // }
- * // ]
- * // }
- * // ]
- * });
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * vision.detectLandmarks('image.jpg').then(function(data) {
- * var landmarks = data[0];
- * var apiResponse = data[1];
- * });
- */
-Vision.prototype.detectLandmarks = function(images, options, callback) {
- if (is.fn(options)) {
- callback = options;
- options = {};
- }
-
- options = extend({}, options, {
- types: ['landmarks']
- });
-
- this.detect(images, options, callback);
-};
-
-/**
- * Detect the logos from an image.
- *
- * Parameters
- *
- * See {module:vision#detect}.
- *
- * @resource [EntityAnnotation JSON representation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#EntityAnnotation}
- *
- * @example
- * vision.detectLogos('image.jpg', function(err, logos, apiResponse) {
- * // logos = [
- * // 'Google'
- * // ]
- * });
- *
- * //-
- * // Activate `verbose` mode for a more detailed response.
- * //-
- * var options = {
- * verbose: true
- * };
- *
- * vision.detectLogos('image.jpg', options, function(err, logos, apiResponse) {
- * // logos = [
- * // {
- * // desc: 'Google',
- * // id: '/m/045c7b',
- * // score: 64.35439,
- * // bounds: [
- * // {
- * // x: 11,
- * // y: 11
- * // },
- * // {
- * // x: 330,
- * // y: 11
- * // },
- * // {
- * // x: 330,
- * // y: 72
- * // },
- * // {
- * // x: 11,
- * // y: 72
- * // }
- * // ]
- * // }
- * // ]
- * });
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * vision.detectLogos('image.jpg').then(function(data) {
- * var logos = data[0];
- * var apiResponse = data[1];
- * });
- */
-Vision.prototype.detectLogos = function(images, options, callback) {
- if (is.fn(options)) {
- callback = options;
- options = {};
- }
-
- options = extend({}, options, {
- types: ['logos']
- });
-
- this.detect(images, options, callback);
-};
-
-/**
- * Get a set of properties about an image, such as its dominant colors.
- *
- * Parameters
- *
- * See {module:vision#detect}.
- *
- * @resource [ImageProperties JSON representation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#ImageProperties}
- *
- * @example
- * vision.detectProperties('image.jpg', function(err, props, apiResponse) {
- * // props = {
- * // colors: [
- * // '3b3027',
- * // '727d81',
- * // '3f2f22',
- * // '838e92',
- * // '482a16',
- * // '5f4f3c',
- * // '261b14',
- * // 'b39b7f',
- * // '51473f',
- * // '2c1e12'
- * // ]
- * // }
- * });
- *
- * //-
- * // Activate `verbose` mode for a more detailed response.
- * //-
- * var image = 'image.jpg';
- *
- * var options = {
- * verbose: true
- * };
- *
- * vision.detectProperties(image, options, function(err, props, apiResponse) {
- * // props = {
- * // colors: [
- * // {
- * // red: 59,
- * // green: 48,
- * // blue: 39,
- * // score: 26.618013,
- * // coverage: 15.948276,
- * // hex: '3b3027'
- * // },
- * // {
- * // red: 114,
- * // green: 125,
- * // blue: 129,
- * // score: 10.319714,
- * // coverage: 8.3977409,
- * // hex: '727d81'
- * // },
- * // // ...
- * // ]
- * // }
- * });
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * vision.detectProperties('image.jpg').then(function(data) {
- * var props = data[0];
- * var apiResponse = data[1];
- * });
- */
-Vision.prototype.detectProperties = function(images, options, callback) {
- if (is.fn(options)) {
- callback = options;
- options = {};
- }
-
- options = extend({}, options, {
- types: ['properties']
- });
-
- this.detect(images, options, callback);
-};
-
-/**
- * Detect the SafeSearch flags from an image.
- *
- * Parameters
- *
- * See {module:vision#detect}.
- *
- * @resource [SafeSearch JSON representation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#SafeSearchAnnotation}
- *
- * @example
- * vision.detectSafeSearch('image.jpg', function(err, safeSearch, apiResponse) {
- * // safeSearch = {
- * // adult: false,
- * // medical: false,
- * // spoof: false,
- * // violence: true
- * // }
- * });
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * vision.detectSafeSearch('image.jpg').then(function(data) {
- * var safeSearch = data[0];
- * var apiResponse = data[1];
- * });
- */
-Vision.prototype.detectSafeSearch = function(images, options, callback) {
- if (is.fn(options)) {
- callback = options;
- options = {};
- }
-
- options = extend({}, options, {
- types: ['safeSearch']
- });
-
- this.detect(images, options, callback);
-};
-
-/**
- * Detect similar images from the internet.
- *
- * Parameters
- *
- * See {module:vision#detect}.
- *
- * @resource [WebAnnotation JSON representation]{@link https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#WebAnnotation}
- *
- * @example
- * vision.detectSimilar('image.jpg', function(err, images, apiResponse) {
- * // images = [
- * // 'http://www.example.com/most-similar-image',
- * // // ...
- * // 'http://www.example.com/least-similar-image'
- * // ]
- * });
- *
- * //-
- * // Activate `verbose` mode for a more detailed response.
- * //-
- * var opts = {
- * verbose: true
- * };
- *
- * vision.detectSimilar('image.jpg', opts, function(err, similar, apiResponse) {
- * // similar = {
- * // entities: [
- * // 'Logo',
- * // // ...
- * // ],
- * // fullMatches: [
- * // 'http://www.example.com/most-similar-image',
- * // // ...
- * // 'http://www.example.com/least-similar-image'
- * // ],
- * // partialMatches: [
- * // 'http://www.example.com/most-similar-image',
- * // // ...
- * // 'http://www.example.com/least-similar-image'
- * // ],
- * // pages: [
- * // 'http://www.example.com/page-with-most-similar-image',
- * // // ...
- * // 'http://www.example.com/page-with-least-similar-image'
- * // ]
- * // }
- * });
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * vision.detectSimilar('image.jpg').then(function(data) {
- * var images = data[0];
- * var apiResponse = data[1];
- * });
- */
-Vision.prototype.detectSimilar = function(images, options, callback) {
- if (is.fn(options)) {
- callback = options;
- options = {};
- }
-
- options = extend({}, options, {
- types: ['similar']
- });
-
- this.detect(images, options, callback);
-};
-
-/**
- * Detect the text within an image.
- *
- * Parameters
- *
- * See {module:vision#detect}.
- *
- * @example
- * vision.detectText('image.jpg', function(err, text, apiResponse) {
- * // text = [
- * // 'This was text found in the image'
- * // ]
- * });
- *
- * //-
- * // Activate `verbose` mode for a more detailed response.
- * //-
- * var options = {
- * verbose: true
- * };
- *
- * vision.detectText('image.jpg', options, function(err, text, apiResponse) {
- * // text = [
- * // {
- * // desc: 'This was text found in the image',
- * // bounds: [
- * // {
- * // x: 4,
- * // y: 5
- * // },
- * // {
- * // x: 493,
- * // y: 5
- * // },
- * // {
- * // x: 493,
- * // y: 89
- * // },
- * // {
- * // x: 4,
- * // y: 89
- * // }
- * // ]
- * // }
- * // ]
- * });
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * vision.detectText('image.jpg').then(function(data) {
- * var text = data[0];
- * var apiResponse = data[1];
- * });
- */
-Vision.prototype.detectText = function(images, options, callback) {
- if (is.fn(options)) {
- callback = options;
- options = {};
- }
-
- options = extend({}, options, {
- types: ['text']
- });
-
- this.detect(images, options, callback);
-};
-
-/**
- * Annotate a document.
- *
- * Parameters
- *
- * See {module:vision#detect}.
- *
- * @resource [FullTextAnnotation JSON representation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#FullTextAnnotation}
- *
- * @example
- * vision.readDocument('image.jpg', function(err, text, apiResponse) {
- * // text = 'This paragraph was extracted from image.jpg';
- * });
- *
- * //-
- * // Activate `verbose` mode for a more detailed response.
- * //-
- * var opts = {
- * verbose: true
- * };
- *
- * vision.readDocument('image.jpg', opts, function(err, pages, apiResponse) {
- * // pages = [
- * // {
- * // languages: [
- * // 'en'
- * // ],
- * // width: 688,
- * // height: 1096,
- * // blocks: [
- * // {
- * // type: 'TEXT',
- * // bounds: [
- * // {
- * // x: 4,
- * // y: 5
- * // },
- * // {
- * // x: 493,
- * // y: 5
- * // },
- * // {
- * // x: 493,
- * // y: 89
- * // },
- * // {
- * // x: 4,
- * // y: 89
- * // }
- * // ],
- * // paragraphs: [
- * // {
- * // bounds: [
- * // {
- * // x: 4,
- * // y: 5
- * // },
- * // {
- * // x: 493,
- * // y: 5
- * // },
- * // {
- * // x: 493,
- * // y: 89
- * // },
- * // {
- * // x: 4,
- * // y: 89
- * // }
- * // ],
- * // words: [
- * // {
- * // bounds: [
- * // {
- * // x: 4,
- * // y: 5
- * // },
- * // {
- * // x: 493,
- * // y: 5
- * // },
- * // {
- * // x: 493,
- * // y: 89
- * // },
- * // {
- * // x: 4,
- * // y: 89
- * // }
- * // ],
- * // symbols: [
- * // {
- * // bounds: [
- * // {
- * // x: 4,
- * // y: 5
- * // },
- * // {
- * // x: 493,
- * // y: 5
- * // },
- * // {
- * // x: 493,
- * // y: 89
- * // },
- * // {
- * // x: 4,
- * // y: 89
- * // }
- * // ],
- * // text: 'T'
- * // },
- * // // ...
- * // ]
- * // },
- * // // ...
- * // ]
- * // },
- * // // ...
- * // ]
- * // },
- * // // ...
- * // ]
- * // }
- * // ]
- * });
- *
- * //-
- * // If the callback is omitted, we'll return a Promise.
- * //-
- * vision.readDocument('image.jpg').then(function(data) {
- * var pages = data[0];
- * var apiResponse = data[1];
- * });
- */
-Vision.prototype.readDocument = function(images, options, callback) {
- if (is.fn(options)) {
- callback = options;
- options = {};
- }
-
- options = extend({}, options, {
- types: ['document']
- });
-
- this.detect(images, options, callback);
-};
-
-/**
- * Determine the type of image the user is asking to be annotated. If a
- * {module:storage/file}, convert to its "gs://{bucket}/{file}" URL. If a remote
- * URL, format as the API expects. If a file path to a local file, convert to a
- * base64 string.
- *
- * @private
- */
-Vision.findImages_ = function(images, callback) {
- if (global.GCLOUD_SANDBOX_ENV) {
- callback(null, [
- {
- content: new Buffer('')
- }
- ]);
- return;
- }
-
- var MAX_PARALLEL_LIMIT = 5;
- images = arrify(images);
-
- function findImage(image, callback) {
- if (Buffer.isBuffer(image)) {
- callback(null, {
- content: image.toString('base64')
- });
- return;
- }
-
- if (common.util.isCustomType(image, 'storage/file')) {
- callback(null, {
- source: {
- gcsImageUri: format('gs://{bucketName}/{fileName}', {
- bucketName: image.bucket.name,
- fileName: image.name
- })
- }
- });
- return;
- }
-
- // File is a URL.
- if (/^http/.test(image)) {
- callback(null, {
- source: {
- imageUri: image
- }
- });
- return;
- }
-
- // File exists on disk.
- fs.readFile(image, { encoding: 'base64' }, function(err, contents) {
- if (err) {
- callback(err);
- return;
- }
-
- callback(null, { content: contents });
- });
- }
-
- async.mapLimit(images, MAX_PARALLEL_LIMIT, findImage, callback);
-};
-
-/**
- * Format a raw crop hint annotation response from the API.
- *
- * @private
- */
-Vision.formatCropHintsAnnotation_ = function(cropHintsAnnotation, options) {
- return cropHintsAnnotation.cropHints.map(function(cropHint) {
- cropHint = {
- bounds: cropHint.boundingPoly.vertices,
- confidence: cropHint.confidence
- };
-
- return options.verbose ? cropHint : cropHint.bounds;
- });
-};
-
-/**
- * Format a raw entity annotation response from the API.
- *
- * @private
- */
-Vision.formatEntityAnnotation_ = function(entityAnnotation, options) {
- if (!options.verbose) {
- return entityAnnotation.description;
- }
-
- var formattedEntityAnnotation = {
- desc: entityAnnotation.description
- };
-
- if (entityAnnotation.mid) {
- formattedEntityAnnotation.mid = entityAnnotation.mid;
- }
-
- if (entityAnnotation.score) {
- formattedEntityAnnotation.score = entityAnnotation.score * 100;
- }
-
- if (entityAnnotation.boundingPoly) {
- formattedEntityAnnotation.bounds = entityAnnotation.boundingPoly.vertices;
- }
-
- if (is.defined(entityAnnotation.confidence)) {
- formattedEntityAnnotation.confidence = entityAnnotation.confidence * 100;
- }
-
- if (entityAnnotation.locations) {
- var locations = entityAnnotation.locations;
- formattedEntityAnnotation.locations = locations.map(prop('latLng'));
- }
-
- if (entityAnnotation.properties) {
- formattedEntityAnnotation.properties = entityAnnotation.properties;
- }
-
- return formattedEntityAnnotation;
-};
-
-/**
- * Format a raw error from the API.
- *
- * @private
- */
-Vision.formatError_ = function(err) {
- var httpError = commonGrpc.Service.GRPC_ERROR_CODE_TO_HTTP[err.code];
-
- if (httpError) {
- err.code = httpError.code;
- }
-
- delete err.details;
-
- return err;
-};
-
-/**
- * Format a raw face annotation response from the API.
- *
- * @private
- */
-Vision.formatFaceAnnotation_ = function(faceAnnotation) {
- function findLandmark(type) {
- var landmarks = faceAnnotation.landmarks;
-
- return landmarks.filter(function(landmark) {
- return landmark.type === type;
- })[0].position;
- }
-
- var formattedFaceAnnotation = {
- angles: {
- pan: faceAnnotation.panAngle,
- roll: faceAnnotation.rollAngle,
- tilt: faceAnnotation.tiltAngle
- },
-
- bounds: {
- head: faceAnnotation.boundingPoly.vertices,
- face: faceAnnotation.fdBoundingPoly.vertices
- },
-
- features: {
- confidence: faceAnnotation.landmarkingConfidence * 100,
- chin: {
- center: findLandmark('CHIN_GNATHION'),
- left: findLandmark('CHIN_LEFT_GONION'),
- right: findLandmark('CHIN_RIGHT_GONION')
- },
- ears: {
- left: findLandmark('LEFT_EAR_TRAGION'),
- right: findLandmark('RIGHT_EAR_TRAGION'),
- },
- eyebrows: {
- left: {
- left: findLandmark('LEFT_OF_LEFT_EYEBROW'),
- right: findLandmark('RIGHT_OF_LEFT_EYEBROW'),
- top: findLandmark('LEFT_EYEBROW_UPPER_MIDPOINT')
- },
- right: {
- left: findLandmark('LEFT_OF_RIGHT_EYEBROW'),
- right: findLandmark('RIGHT_OF_RIGHT_EYEBROW'),
- top: findLandmark('RIGHT_EYEBROW_UPPER_MIDPOINT')
- }
- },
- eyes: {
- left: {
- bottom: findLandmark('LEFT_EYE_BOTTOM_BOUNDARY'),
- center: findLandmark('LEFT_EYE'),
- left: findLandmark('LEFT_EYE_LEFT_CORNER'),
- pupil: findLandmark('LEFT_EYE_PUPIL'),
- right: findLandmark('LEFT_EYE_RIGHT_CORNER'),
- top: findLandmark('LEFT_EYE_TOP_BOUNDARY')
- },
- right: {
- bottom: findLandmark('RIGHT_EYE_BOTTOM_BOUNDARY'),
- center: findLandmark('RIGHT_EYE'),
- left: findLandmark('RIGHT_EYE_LEFT_CORNER'),
- pupil: findLandmark('RIGHT_EYE_PUPIL'),
- right: findLandmark('RIGHT_EYE_RIGHT_CORNER'),
- top: findLandmark('RIGHT_EYE_TOP_BOUNDARY')
- }
- },
- forehead: findLandmark('FOREHEAD_GLABELLA'),
- lips: {
- bottom: findLandmark('LOWER_LIP'),
- top: findLandmark('UPPER_LIP')
- },
- mouth: {
- center: findLandmark('MOUTH_CENTER'),
- left: findLandmark('MOUTH_LEFT'),
- right: findLandmark('MOUTH_RIGHT')
- },
- nose: {
- bottom: {
- center: findLandmark('NOSE_BOTTOM_CENTER'),
- left: findLandmark('NOSE_BOTTOM_LEFT'),
- right: findLandmark('NOSE_BOTTOM_RIGHT')
- },
- tip: findLandmark('NOSE_TIP'),
- top: findLandmark('MIDPOINT_BETWEEN_EYES')
- }
- },
-
- confidence: faceAnnotation.detectionConfidence * 100
- };
-
- // Remove the `Likelihood` part from a property name.
- // input: "joyLikelihood", output: "joy"
- for (var prop in faceAnnotation) {
- if (prop.indexOf('Likelihood') > -1) {
- var shortenedProp = prop.replace('Likelihood', '');
-
- formattedFaceAnnotation[shortenedProp] =
- Vision.gteLikelihood_(LIKELY, faceAnnotation[prop]);
-
- formattedFaceAnnotation[prop] = Vision.likelihood[faceAnnotation[prop]];
- }
- }
-
- return formattedFaceAnnotation;
-};
-
-/**
- * Format a raw full text annotation response from the API.
- *
- * @private
- */
-Vision.formatFullTextAnnotation_ = function(fullTextAnnotation, options) {
- if (!options.verbose) {
- return fullTextAnnotation.text;
- }
-
- return fullTextAnnotation.pages
- .map(function(page) {
- return {
- languages: page.property.detectedLanguages.map(prop('languageCode')),
- width: page.width,
- height: page.height,
- blocks: page.blocks.map(function(block) {
- return {
- type: block.blockType,
- bounds: block.boundingBox && block.boundingBox.vertices || [],
- paragraphs: arrify(block.paragraphs)
- .map(function(paragraph) {
- return {
- bounds: paragraph.boundingBox.vertices,
- words: paragraph.words.map(function(word) {
- return {
- bounds: word.boundingBox.vertices,
- symbols: word.symbols.map(function(symbol) {
- return {
- bounds: symbol.boundingBox.vertices,
- text: symbol.text
- };
- })
- };
- })
- };
- })
- };
- })
- };
- });
-};
-
-/**
- * Format a raw image properties annotation response from the API.
- *
- * @private
- */
-Vision.formatImagePropertiesAnnotation_ = function(imageAnnotation, options) {
- var formattedImageAnnotation = {
- colors: imageAnnotation.dominantColors.colors
- .map(function(colorObject) {
- var red = colorObject.color.red;
- var green = colorObject.color.green;
- var blue = colorObject.color.blue;
-
- var hex = rgbHex(red, green, blue);
-
- if (!options.verbose) {
- return hex;
- }
-
- colorObject.hex = hex;
-
- colorObject.red = red;
- colorObject.green = green;
- colorObject.blue = blue;
- delete colorObject.color;
-
- colorObject.coverage = colorObject.pixelFraction *= 100;
- delete colorObject.pixelFraction;
-
- colorObject.score *= 100;
-
- return colorObject;
- })
- };
-
- return formattedImageAnnotation;
-};
-
-/**
- * Format a raw SafeSearch annotation response from the API.
- *
- * @private
- */
-Vision.formatSafeSearchAnnotation_ = function(ssAnnotation, options) {
- if (!options.verbose) {
- for (var prop in ssAnnotation) {
- var value = ssAnnotation[prop];
- ssAnnotation[prop] = Vision.gteLikelihood_(LIKELY, value);
- }
- return ssAnnotation;
- }
-
- return ssAnnotation;
-};
-
-/**
- * Format a raw web detection response from the API.
- *
- * @private
- */
-Vision.formatWebDetection_ = function(webDetection, options) {
- function sortByScore(a, b) {
- return a.score < b.score ? 1 : a.score > b.score ? -1 : 0;
- }
-
- var formattedWebDetection = {
- entities: arrify(webDetection.webEntities).map(prop('description')),
-
- fullMatches: arrify(webDetection.fullMatchingImages)
- .sort(sortByScore)
- .map(prop('url')),
-
- partialMatches: arrify(webDetection.partialMatchingImages)
- .sort(sortByScore)
- .map(prop('url')),
-
- pages: arrify(webDetection.pagesWithMatchingImages)
- .sort(sortByScore)
- .map(prop('url'))
- };
-
- if (!options.verbose) {
- // Combine all matches.
- formattedWebDetection = formattedWebDetection.fullMatches
- .concat(formattedWebDetection.partialMatches);
- }
-
- return formattedWebDetection;
-};
-
-/**
- * Convert a "likelihood" value to a boolean representation, based on the lowest
- * likelihood provided.
- *
- * @private
- *
- * @example
- * Vision.gteLikelihood_(Vision.likelihood.VERY_LIKELY, 'POSSIBLE');
- * // false
- *
- * Vision.gteLikelihood_(Vision.likelihood.UNLIKELY, 'POSSIBLE');
- * // true
- */
-Vision.gteLikelihood_ = function(baseLikelihood, likelihood) {
- return Vision.likelihood[likelihood] >= baseLikelihood;
-};
-
-/*! Developer Documentation
- *
- * All async methods (except for streams) will return a Promise in the event
- * that a callback is omitted.
- */
-common.util.promisifyAll(Vision);
-module.exports = Vision;
-module.exports.v1 = v1;
+module.exports = visionV1;
+module.exports.v1 = visionV1;
diff --git a/packages/vision/system-test/vision.js b/packages/vision/system-test/vision.js
index 515f7dec4dc..5e6e448d1d4 100644
--- a/packages/vision/system-test/vision.js
+++ b/packages/vision/system-test/vision.js
@@ -19,9 +19,6 @@
var assert = require('assert');
var async = require('async');
var fs = require('fs');
-var is = require('is');
-var multiline = require('multiline');
-var normalizeNewline = require('normalize-newline');
var path = require('path');
var Storage = require('@google-cloud/storage');
var uuid = require('node-uuid');
@@ -44,7 +41,6 @@ describe('Vision', function() {
var vision = new Vision(env);
var bucket = storage.bucket(generateName());
- var file = bucket.file('logo.jpg');
before(function(done) {
bucket.create(function(err) {
@@ -81,555 +77,48 @@ describe('Vision', function() {
});
});
- it('should detect from a URL', function(done) {
+ it('should detect from a URL', () => {
var url = 'https://upload.wikimedia.org/wikipedia/commons/5/51/Google.png';
-
- vision.detect(url, ['logos'], function(err, logos) {
- assert.ifError(err);
-
- assert.deepEqual(logos, ['Google']);
-
- done();
+ return vision.logoDetection({
+ source: {imageUri: url},
+ }).then(responses => {
+ var response = responses[0];
+ assert.deepEqual(response.logoAnnotations[0].description, 'Google');
});
});
- it('should detect from a File', function(done) {
- vision.detect(file, ['logos'], function(err, logos) {
- assert.ifError(err);
-
- assert.deepEqual(logos, ['Google']);
-
- done();
+ it('should detect from a filename', () => {
+ return vision.logoDetection({
+ source: {filename: IMAGES.logo},
+ }).then(responses => {
+ var response = responses[0];
+ assert.deepEqual(response.logoAnnotations[0].description, 'Google');
});
});
- it('should detect from a Buffer', function(done) {
+ it('should detect from a Buffer', () => {
var buffer = fs.readFileSync(IMAGES.logo);
- vision.detect(buffer, ['logos'], function(err, logos) {
- assert.ifError(err);
-
- assert.deepEqual(logos, ['Google']);
-
- done();
- });
- });
-
- describe('single image', function() {
- var TYPES = ['faces', 'labels', 'safeSearch'];
-
- it('should perform a single detection', function(done) {
- vision.detect(IMAGES.rushmore, TYPES[0], function(err, detections) {
- assert.ifError(err);
-
- assert(is.array(detections));
-
- done();
- });
- });
-
- it('should perform multiple detections', function(done) {
- vision.detect(IMAGES.rushmore, TYPES, function(err, detections) {
- assert.ifError(err);
-
- assert(is.array(detections.faces));
- assert(is.array(detections.labels));
- assert(is.object(detections.safeSearch));
-
- done();
- });
- });
-
- it('should return errors', function(done) {
- vision.detect(IMAGES.malformed, TYPES, function(err, detections) {
- assert.strictEqual(err.name, 'PartialFailureError');
- assert(is.array(err.errors));
- assert.strictEqual(err.errors.length, 1);
-
- assert.deepEqual(detections, []);
- done();
- });
- });
- });
-
- describe('multiple images', function() {
- var TYPES = ['faces', 'labels', 'safeSearch'];
-
- it('should perform a single detection', function(done) {
- var images = [IMAGES.logo, IMAGES.rushmore];
-
- vision.detect(images, TYPES[0], function(err, detections) {
- assert.ifError(err);
-
- var image1detections = detections[0];
- var image2detections = detections[1];
-
- assert(is.array(image1detections));
- assert(is.array(image2detections));
-
- done();
- });
- });
-
- it('should perform multiple detections', function(done) {
- var images = [IMAGES.logo, IMAGES.rushmore];
-
- vision.detect(images, TYPES, function(err, detections) {
- assert.ifError(err);
-
- var image1detections = detections[0];
- var image2detections = detections[1];
-
- assert(is.array(image1detections.faces));
- assert(is.array(image1detections.labels));
- assert(is.object(image1detections.safeSearch));
-
- assert(is.array(image2detections.faces));
- assert(is.array(image2detections.labels));
- assert(is.object(image2detections.safeSearch));
-
- done();
- });
- });
-
- it('should return errors', function(done) {
- var images = [IMAGES.logo, IMAGES.malformed];
-
- vision.detect(images, TYPES, function(err, detections) {
- assert.strictEqual(err.name, 'PartialFailureError');
- assert(is.array(err.errors));
- assert.strictEqual(err.errors.length, 1);
-
- var image2errors = err.errors[0];
- assert.deepEqual(image2errors, {
- image: IMAGES.malformed,
- errors: [
- {
- code: 400,
- message:
- 'image-annotator::Bad image data.: Image processing error!',
- type: 'faces'
- },
- {
- code: 400,
- message:
- 'image-annotator::Bad image data.: Image processing error!',
- type: 'labels'
- },
- {
- code: 500,
- message: 'image-annotator::error(12): Image processing error!',
- type: 'safeSearch'
- }
- ]
- });
-
- var image1detections = detections[0];
- assert(is.array(image1detections.faces));
- assert(is.array(image1detections.labels));
- assert(is.object(image1detections.safeSearch));
-
- var image2detections = detections[1];
- assert.deepEqual(image2detections, {});
-
- done();
- });
- });
- });
-
- describe('crops', function() {
- it('should detect crops from an image', function(done) {
- vision.detectCrops(IMAGES.logo, function(err, crops) {
- assert.ifError(err);
- assert.strictEqual(crops.length, 1);
- assert.strictEqual(crops[0].length, 4);
- done();
- });
- });
-
- it('should detect crops from multiple images', function(done) {
- vision.detectCrops([
- IMAGES.logo,
- IMAGES.rushmore
- ], function(err, crops) {
- assert.ifError(err);
-
- assert.strictEqual(crops.length, 2);
- assert.strictEqual(crops[0][0].length, 4);
- assert.strictEqual(crops[1][0].length, 4);
-
- done();
- });
- });
- });
-
- describe('documents', function() {
- it('should detect text from a document', function(done) {
- vision.readDocument(IMAGES.document, function(err, text) {
- assert.ifError(err);
-
- assert.strictEqual(typeof text, 'string');
-
- done();
- });
- });
-
- it('should detect pages from multiple documents', function(done) {
- vision.readDocument([
- IMAGES.document,
- IMAGES.logo
- ], function(err, pages) {
- assert.ifError(err);
-
- assert.strictEqual(pages.length, 2);
- assert(typeof pages[0], 'object');
- assert(typeof pages[1], 'object');
-
- done();
- });
- });
- });
-
- describe('faces', function() {
- it('should detect faces from an image', function(done) {
- vision.detectFaces(IMAGES.rushmore, function(err, faces) {
- assert.ifError(err);
-
- assert.strictEqual(faces.length, 1);
-
- done();
- });
- });
-
- it('should detect faces from multiple images', function(done) {
- vision.detectFaces([
- IMAGES.logo,
- IMAGES.rushmore
- ], function(err, faces) {
- assert.ifError(err);
-
- assert.strictEqual(faces.length, 2);
- assert.strictEqual(faces[0].length, 0);
- assert.strictEqual(faces[1].length, 1);
-
- done();
- });
- });
- });
-
- describe('labels', function() {
- it('should detect labels', function(done) {
- vision.detectLabels(IMAGES.rushmore, function(err, labels) {
- assert.ifError(err);
-
- assert(labels.length > -1);
-
- done();
- });
- });
-
- it('should detect labels from multiple images', function(done) {
- vision.detectLabels([
- IMAGES.logo,
- IMAGES.rushmore
- ], function(err, labels) {
- assert.ifError(err);
-
- assert.strictEqual(labels.length, 2);
-
- assert(labels[0].length > -1);
- assert(labels[1].length > -1);
-
- done();
- });
- });
-
- it('should support verbose mode', function(done) {
- var options = {
- verbose: true
- };
-
- vision.detectLabels(IMAGES.rushmore, options, function(err, labels) {
- assert.ifError(err);
-
- assert(is.defined(labels[0].mid));
-
- done();
- });
- });
- });
-
- describe('landmarks', function() {
- it('should detect landmarks from an image', function(done) {
- vision.detectLandmarks(IMAGES.rushmore, function(err, landmarks) {
- assert.ifError(err);
-
- assert.deepEqual(landmarks, ['Mount Rushmore']);
-
- done();
- });
- });
-
- it('should detect landmarks from multiple images', function(done) {
- vision.detectLandmarks([
- IMAGES.logo,
- IMAGES.rushmore
- ], function(err, landmarks) {
- assert.ifError(err);
-
- assert.strictEqual(landmarks.length, 2);
-
- assert.deepEqual(landmarks[0], []);
- assert.deepEqual(landmarks[1], ['Mount Rushmore']);
-
- done();
- });
- });
-
- it('should support verbose mode', function(done) {
- var opts = {
- verbose: true
- };
-
- vision.detectLandmarks(IMAGES.rushmore, opts, function(err, landmarks) {
- assert.ifError(err);
-
- assert(is.defined(landmarks[0].mid));
-
- done();
- });
- });
- });
-
- describe('logos', function() {
- it('should detect logos from an image', function(done) {
- vision.detectLogos(IMAGES.logo, function(err, logos) {
- assert.ifError(err);
-
- assert.deepEqual(logos, ['Google']);
-
- done();
- });
- });
-
- it('should detect logos from multiple images', function(done) {
- vision.detectLogos([
- IMAGES.rushmore,
- IMAGES.logo
- ], function(err, logos) {
- assert.ifError(err);
-
- assert.strictEqual(logos.length, 2);
-
- assert.deepEqual(logos[0], []);
- assert.deepEqual(logos[1], ['Google']);
-
- done();
- });
- });
-
- it('should support verbose mode', function(done) {
- var options = {
- verbose: true
- };
-
- vision.detectLogos(IMAGES.logo, options, function(err, logos) {
- assert.ifError(err);
-
- assert(is.defined(logos[0].mid));
-
- done();
- });
+ return vision.logoDetection(buffer).then(responses => {
+ var response = responses[0];
+ assert.deepEqual(response.logoAnnotations[0].description, 'Google');
});
});
- describe('properties', function() {
- it('should detect image properties', function(done) {
- vision.detectProperties(IMAGES.rushmore, function(err, properties) {
- assert.ifError(err);
-
- assert.deepEqual(properties.colors, [
- '3b3027',
- '727d81',
- '3f3022',
- '838e92',
- '482b17',
- '5f4e3d',
- '261c14',
- 'b29a7f',
- '51473e',
- '2c1e12'
- ]);
-
- done();
- });
- });
-
- it('should detect image properties from multiple images', function(done) {
- vision.detectProperties([
- IMAGES.logo,
- IMAGES.rushmore
- ], function(err, properties) {
- assert.ifError(err);
-
- assert.strictEqual(properties.length, 2);
- assert(is.array(properties[0].colors));
- assert(is.array(properties[1].colors));
-
- done();
- });
- });
-
- it('should support verbose mode', function(done) {
- var options = {
- verbose: true
- };
-
- vision.detectProperties(IMAGES.rushmore, options, function(err, props) {
- assert.ifError(err);
-
- assert(is.object(props.colors[0]));
-
- done();
- });
- });
- });
-
- describe('SafeSearch', function() {
- it('should detect SafeSearch', function(done) {
- vision.detectSafeSearch(IMAGES.rushmore, function(err, safesearch) {
- assert.ifError(err);
-
- assert.deepEqual(safesearch, {
- adult: false,
- medical: false,
- spoof: false,
- violence: false
- });
-
- done();
- });
- });
-
- it('should detect SafeSearch from multiple images', function(done) {
- vision.detectSafeSearch([
- IMAGES.logo,
- IMAGES.rushmore
- ], function(err, safesearches) {
- assert.ifError(err);
-
- assert.strictEqual(safesearches.length, 2);
- assert.deepEqual(safesearches[0], {
- adult: false,
- medical: false,
- spoof: false,
- violence: false
- });
- assert.deepEqual(safesearches[1], {
- adult: false,
- medical: false,
- spoof: false,
- violence: false
- });
-
- done();
- });
- });
-
- it('should support verbose mode', function(done) {
- var options = {
- verbose: true
- };
-
- vision.detectSafeSearch(IMAGES.rushmore, options, function(err, ss) {
- assert.ifError(err);
-
- assert(!is.boolean(ss.adult));
-
- done();
- });
- });
- });
-
- describe('similar', function() {
- it('should detect similar images from the internet', function(done) {
- vision.detectSimilar(IMAGES.logo, function(err, images) {
- assert.ifError(err);
- assert(images.length > -1);
- done();
- });
- });
-
- it('should detect similar images from multiple images', function(done) {
- vision.detectSimilar([
- IMAGES.logo,
- IMAGES.rushmore
- ], function(err, images) {
- assert.ifError(err);
-
- assert.strictEqual(images.length, 2);
-
- assert(images[0].length > -1);
- assert(images[1].length > -1);
-
- done();
- });
- });
- });
-
- describe('text', function() {
- var expectedResults = [
- normalizeNewline(multiline.stripIndent(function() {/*
- Google Cloud Client Library for Node js an idiomatic, intuitive, and
- natural way for Node.js developers to integrate with Google Cloud
- Platform services, like Cloud Datastore and Cloud Storage.
-
- */}))
+ describe('single image', () => {
+ var TYPES = [
+ {type: 'FACE_DETECTION'},
+ {type: 'LABEL_DETECTION'},
+ {type: 'SAFE_SEARCH_DETECTION'},
];
-
- expectedResults = expectedResults.concat(
- expectedResults[0]
- .replace(/\n/g, ' ')
- .trim()
- .split(' ')
- );
-
- it('should detect text', function(done) {
- vision.detectText(IMAGES.text, function(err, text) {
- assert.ifError(err);
-
- assert.deepEqual(text, expectedResults);
-
- done();
- });
- });
-
- it('should detect text from multiple images', function(done) {
- vision.detectText([
- IMAGES.rushmore,
- IMAGES.text
- ], function(err, texts) {
- assert.ifError(err);
-
- assert.strictEqual(texts.length, 2);
-
- assert.deepEqual(texts[0], []);
- assert.deepEqual(texts[1], expectedResults);
-
- done();
- });
- });
-
- it('should support verbose mode', function(done) {
- var options = {
- verbose: true
- };
-
- vision.detectText(IMAGES.text, options, function(err, text) {
- assert.ifError(err);
-
- assert(is.defined(text[0].bounds));
-
- done();
+ it('should perform multiple detections', () => {
+ return vision.annotateImage({
+ features: TYPES,
+ image: {source: {filename: IMAGES.rushmore}},
+ }).then(responses => {
+ var response = responses[0];
+ assert(response.faceAnnotations.length >= 1);
+ assert(response.labelAnnotations.length >= 1);
+ assert(response.safeSearchAnnotation !== null);
});
});
});
diff --git a/packages/vision/test/gapic-v1.js b/packages/vision/test/gapic-v1.js
new file mode 100644
index 00000000000..9c917eb69f5
--- /dev/null
+++ b/packages/vision/test/gapic-v1.js
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2016 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var assert = require('assert');
+var visionV1 = require('../src/v1')();
+
+var FAKE_STATUS_CODE = 1;
+var error = new Error();
+error.code = FAKE_STATUS_CODE;
+
+describe('ImageAnnotatorClient', function() {
+ describe('batchAnnotateImages', function() {
+ it('invokes batchAnnotateImages without error', function(done) {
+ var client = visionV1.imageAnnotatorClient();
+ // Mock request
+ var requests = [];
+ var request = {
+ requests : requests
+ };
+
+ // Mock response
+ var expectedResponse = {};
+
+ // Mock Grpc layer
+ client._batchAnnotateImages = mockSimpleGrpcMethod(request, expectedResponse);
+
+ client.batchAnnotateImages(request, function(err, response) {
+ assert.ifError(err);
+ assert.deepStrictEqual(response, expectedResponse);
+ done();
+ });
+ });
+
+ it('invokes batchAnnotateImages with error', function(done) {
+ var client = visionV1.imageAnnotatorClient();
+ // Mock request
+ var requests = [];
+ var request = {
+ requests : requests
+ };
+
+ // Mock Grpc layer
+ client._batchAnnotateImages = mockSimpleGrpcMethod(request, null, error);
+
+ client.batchAnnotateImages(request, function(err, response) {
+ assert(err instanceof Error);
+ assert.equal(err.code, FAKE_STATUS_CODE);
+ done();
+ });
+ });
+ });
+
+});
+
+function mockSimpleGrpcMethod(expectedRequest, response, error) {
+ return function(actualRequest, options, callback) {
+ assert.deepStrictEqual(actualRequest, expectedRequest);
+ if (error) {
+ callback(error);
+ } else if (response) {
+ callback(null, response);
+ } else {
+ callback(null);
+ }
+ };
+}
diff --git a/packages/vision/test/helpers.test.js b/packages/vision/test/helpers.test.js
new file mode 100644
index 00000000000..c740193bfe7
--- /dev/null
+++ b/packages/vision/test/helpers.test.js
@@ -0,0 +1,283 @@
+/*!
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+'use strict';
+
+var assert = require('assert');
+var fs = require('fs');
+var is = require('is');
+var sinon = require('sinon');
+
+var Vision = require('../');
+
+
+describe('Vision helper methods', () => {
+ var sandbox = sinon.sandbox.create();
+
+ afterEach(() => {
+ sandbox.restore();
+ });
+
+ describe('annotateImage', () => {
+ it('calls batchAnnotateImages correctly', () => {
+ var vision = Vision.v1();
+ var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages');
+ batchAnnotate.callsArgWith(2, undefined, {responses: [{
+ logoAnnotations: [{description: 'Google'}],
+ }]});
+
+ // Ensure that the annotateImage method arrifies the request and
+ // passes it through to the batch annotation method.
+ var request = {
+ image: {content: new Buffer('bogus==')},
+ features: {type: ['LOGO_DETECTION']},
+ };
+ return vision.annotateImage(request).then(r => {
+ var response = r[0];
+
+ // Ensure that we got the slice of the response that we expected.
+ assert.deepEqual(response, {
+ logoAnnotations: [{description: 'Google'}],
+ });
+
+ // Inspect the calls to batchAnnotateImages and ensure they matched
+ // the expected signature.
+ assert(batchAnnotate.callCount === 1);
+ assert(batchAnnotate.calledWith([request]));
+ });
+ });
+
+ it('understands buffers', () => {
+ var vision = Vision.v1();
+
+ // Stub out the batch annotation method.
+ var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages');
+ batchAnnotate.callsArgWith(2, undefined, {responses: [{
+ logoAnnotations: [{description: 'Google'}],
+ }]});
+
+ // Ensure that the annotateImage method arrifies the request and
+ // passes it through to the batch annotation method.
+ var request = {
+ image: new Buffer('fakeImage'),
+ features: {type: ['LOGO_DETECTION']},
+ };
+ return vision.annotateImage(request).then(r => {
+ var response = r[0];
+
+ // Ensure that we got the slice of the response that we expected.
+ assert.deepEqual(response, {
+ logoAnnotations: [{description: 'Google'}],
+ });
+
+ // Inspect the calls to batchAnnotateImages and ensure they matched
+ // the expected signature.
+ assert(batchAnnotate.callCount === 1);
+ assert.deepEqual(request, {
+ image: {content: 'ZmFrZUltYWdl'},
+ features: {type: ['LOGO_DETECTION']},
+ });
+ assert(batchAnnotate.calledWith([request]));
+ });
+ });
+
+ it('understands filenames', () => {
+ var vision = Vision.v1();
+
+ // Stub out `fs.readFile` and return a bogus image object.
+ // This allows us to test filename detection.
+ var readFile = sandbox.stub(fs, 'readFile');
+ readFile.withArgs('image.jpg').callsArgWith(2, null,
+ new Buffer('fakeImage')
+ );
+ readFile.callThrough();
+
+ // Stub out the batch annotation method as before.
+ var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages');
+ batchAnnotate.callsArgWith(2, undefined, {responses: [{
+ logoAnnotations: [{description: 'Google'}],
+ }]});
+
+ // Ensure that the annotateImage method arrifies the request and
+ // passes it through to the batch annotation method.
+ var request = {
+ image: {source: {filename: 'image.jpg'}},
+ features: {type: ['LOGO_DETECTION']},
+ };
+ return vision.annotateImage(request).then(r => {
+ var response = r[0];
+
+ // Ensure that we got the slice of the response that we expected.
+ assert.deepEqual(response, {
+ logoAnnotations: [{description: 'Google'}],
+ });
+
+ // Inspect the calls to `readFile` to ensure that they matched
+ // the expected signature.
+ assert(readFile.callCount === 1);
+ assert(readFile.calledWith('image.jpg'));
+
+ // Inspect the calls to batchAnnotateImages and ensure they matched
+ // the expected signature.
+ assert(batchAnnotate.callCount === 1);
+ assert.deepEqual(request, {
+ image: {content: 'ZmFrZUltYWdl'},
+ features: {type: ['LOGO_DETECTION']},
+ });
+ assert(batchAnnotate.calledWith([request]));
+ });
+ });
+
+ it('propagates the error if a file is not found', () => {
+ var vision = Vision.v1();
+
+ // Stub out `fs.readFile` and return a bogus image object.
+ // This allows us to test filename detection.
+ var readFile = sandbox.stub(fs, 'readFile');
+ readFile.withArgs('image.jpg').callsArgWith(2, {error: 404});
+ readFile.callThrough();
+
+ // Ensure that the annotateImage method arrifies the request and
+ // passes it through to the batch annotation method.
+ var request = {
+ image: {source: {filename: 'image.jpg'}},
+ features: {type: ['LOGO_DETECTION']},
+ };
+ return vision.annotateImage(request).then(assert.fail).catch(err => {
+ assert.deepEqual(err, {error: 404});
+ });
+ });
+
+ it('retains call options sent', () => {
+ var vision = Vision.v1();
+ var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages');
+ batchAnnotate.callsArgWith(2, undefined, {responses: [{
+ logoAnnotations: [{description: 'Google'}],
+ }]});
+
+ // Ensure that the annotateImage method arrifies the request and
+ // passes it through to the batch annotation method.
+ var request = {
+ image: {content: new Buffer('bogus==')},
+ features: {type: ['LOGO_DETECTION']},
+ };
+ return vision.annotateImage(request, {foo: 'bar'}).then(r => {
+ var response = r[0];
+
+ // Ensure that we got the slice of the response that we expected.
+ assert.deepEqual(response, {
+ logoAnnotations: [{description: 'Google'}],
+ });
+
+ // Inspect the calls to batchAnnotateImages and ensure they matched
+ // the expected signature.
+ assert(batchAnnotate.callCount === 1);
+ assert(batchAnnotate.calledWith([request], {foo: 'bar'}));
+ });
+ });
+
+ it('fires a callback if provided', done => {
+ var vision = Vision.v1();
+ var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages');
+ batchAnnotate.callsArgWith(2, undefined, {responses: [{
+ logoAnnotations: [{description: 'Google'}],
+ }]});
+
+ // Ensure that the annotateImage method does *not* pass the callback
+ // on to batchAnnotateImages, but rather handles it itself.
+ var request = {
+ image: {content: new Buffer('bogus==')},
+ features: {type: ['LOGO_DETECTION']},
+ };
+ vision.annotateImage(request, function(err, response) {
+ // Establish that we got the expected response.
+ assert(is.undefined(err));
+ assert.deepEqual(response, {
+ logoAnnotations: [{description: 'Google'}],
+ });
+
+ // Inspect the calls to batchAnnotate and ensure that they match
+ // what we expected.
+ assert(batchAnnotate.callCount === 1);
+ assert(batchAnnotate.calledWith([request], undefined));
+ done();
+ });
+ });
+
+ it('fires the callback on error', () => {
+ var vision = Vision.v1();
+ var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages');
+ batchAnnotate.callsArgWith(2, {message: 'Bad things!'});
+
+ // Ensure that the annotateImage method does *not* pass the callback
+ // on to batchAnnotateImages, but rather handles it itself.
+ var request = {
+ image: {content: new Buffer('bogus==')},
+ features: {type: ['LOGO_DETECTION']},
+ };
+ return vision.annotateImage(request).catch(err => {
+ // Establish that we got the expected response.
+ assert.deepEqual(err, {message: 'Bad things!'});
+
+ // Inspect the calls to batchAnnotate and ensure that they match
+ // what we expected.
+ assert(batchAnnotate.callCount === 1);
+ assert(batchAnnotate.calledWith([request], undefined));
+ });
+ });
+
+ it('requires an image and throws without one', () => {
+ var vision = Vision.v1();
+ var request = {};
+ return vision.annotateImage(request).then(assert.fail).catch(err => {
+ var expected = 'Attempted to call `annotateImage` with no image.';
+ assert(err.message === expected);
+ });
+ });
+ });
+
+ describe('single-feature methods', () => {
+ it('calls annotateImage with the correct feature', () => {
+ var vision = Vision.v1();
+ var annotate = sandbox.spy(vision, 'annotateImage');
+ var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages');
+ batchAnnotate.callsArgWith(2, undefined, {responses: [{
+ logoAnnotations: [{description: 'Google'}],
+ }]});
+
+ // Ensure that the annotateImage method does *not* pass the callback
+ // on to batchAnnotateImages, but rather handles it itself.
+ var image = {content: new Buffer('bogus==')};
+ return vision.logoDetection(image).then(r => {
+ var response = r[0];
+
+ // Ensure that we got the slice of the response that we expected.
+ assert.deepEqual(response, {
+ logoAnnotations: [{description: 'Google'}],
+ });
+
+ // Inspect the calls to annotateImage and batchAnnotateImages and
+ // ensure they matched the expected signature.
+ assert(annotate.callCount === 1);
+ assert(annotate.calledWith({image: image, features: [{type: 3}]}));
+ assert(batchAnnotate.callCount === 1);
+ assert(batchAnnotate.calledWith(
+ [{image: image, features: [{type: 3}]}]
+ ));
+ });
+ });
+ });
+});
diff --git a/packages/vision/test/index.js b/packages/vision/test/index.js
deleted file mode 100644
index 74838291b7c..00000000000
--- a/packages/vision/test/index.js
+++ /dev/null
@@ -1,1868 +0,0 @@
-/**
- * Copyright 2015 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-'use strict';
-
-var assert = require('assert');
-var async = require('async');
-var deepStrictEqual = require('deep-strict-equal');
-var extend = require('extend');
-var fs = require('fs');
-var GrpcService = require('@google-cloud/common-grpc').Service;
-var prop = require('propprop');
-var proxyquire = require('proxyquire');
-var tmp = require('tmp');
-var util = require('@google-cloud/common').util;
-
-var promisified = false;
-var fakeUtil = extend({}, util, {
- promisifyAll: function(Class) {
- if (Class.name === 'Vision') {
- promisified = true;
- }
- }
-});
-
-var fakeV1Override;
-function fakeV1() {
- if (fakeV1Override) {
- return fakeV1Override.apply(null, arguments);
- }
-
- return {
- imageAnnotatorClient: util.noop
- };
-}
-
-describe('Vision', function() {
- var IMAGE = './image.jpg';
- var PROJECT_ID = 'project-id';
-
- var Vision;
- var VisionCached;
- var vision;
-
- var OPTIONS = {
- projectId: PROJECT_ID
- };
-
- before(function() {
- Vision = proxyquire('../', {
- '@google-cloud/common': {
- util: fakeUtil
- },
- './v1': fakeV1
- });
-
- VisionCached = extend({}, Vision);
- });
-
- beforeEach(function() {
- fakeV1Override = null;
-
- vision = new Vision(OPTIONS);
-
- extend(Vision, VisionCached);
- });
-
- describe('instantiation', function() {
- it('should promisify all the things', function() {
- assert(promisified);
- });
-
- it('should normalize the arguments', function() {
- var normalizeArguments = fakeUtil.normalizeArguments;
- var normalizeArgumentsCalled = false;
- var fakeOptions = { projectId: PROJECT_ID };
- var fakeContext = {};
-
- fakeUtil.normalizeArguments = function(context, options) {
- normalizeArgumentsCalled = true;
- assert.strictEqual(context, fakeContext);
- assert.strictEqual(options, fakeOptions);
- return options;
- };
-
- Vision.call(fakeContext, fakeOptions);
- assert(normalizeArgumentsCalled);
-
- fakeUtil.normalizeArguments = normalizeArguments;
- });
-
- it('should create a gax api client', function() {
- var expectedVisionClient = {};
-
- fakeV1Override = function(options) {
- var expected = extend({}, OPTIONS, {
- libName: 'gccl',
- libVersion: require('../package.json').version
- });
- assert.deepStrictEqual(options, expected);
-
- return {
- imageAnnotatorClient: function(options) {
- assert.deepStrictEqual(options, expected);
- return expectedVisionClient;
- }
- };
- };
-
- var vision = new Vision(OPTIONS);
-
- assert.deepEqual(vision.api, {
- Vision: expectedVisionClient
- });
- });
- });
-
- describe('constants', function() {
- it('should define constants', function() {
- assert.strictEqual(Vision.likelihood.VERY_UNLIKELY, 0);
- assert.strictEqual(Vision.likelihood.UNLIKELY, 1);
- assert.strictEqual(Vision.likelihood.POSSIBLE, 2);
- assert.strictEqual(Vision.likelihood.LIKELY, 3);
- assert.strictEqual(Vision.likelihood.VERY_LIKELY, 4);
- });
- });
-
- describe('annotate', function() {
- var REQ = {};
-
- it('should arrify request objects', function(done) {
- vision.api.Vision = {
- batchAnnotateImages: function(reqOpts) {
- assert.deepEqual(reqOpts, {
- requests: [REQ]
- });
- done();
- }
- };
-
- vision.annotate(REQ, assert.ifError);
- });
-
- describe('error', function() {
- var error = new Error('Error.');
- var apiResponse = {};
-
- beforeEach(function() {
- vision.api.Vision = {
- batchAnnotateImages: function(reqOpts, callback) {
- callback(error, apiResponse);
- }
- };
- });
-
- it('should execute callback with error & API response', function(done) {
- vision.annotate(REQ, function(err, annotations, resp) {
- assert.strictEqual(err, error);
- assert.strictEqual(annotations, null);
- assert.strictEqual(resp, apiResponse);
- done();
- });
- });
- });
-
- describe('success', function() {
- var apiResponse = {
- responses: []
- };
-
- beforeEach(function() {
- vision.api.Vision = {
- batchAnnotateImages: function(reqOpts, callback) {
- callback(null, apiResponse);
- }
- };
- });
-
- it('should execute callback with annotations & API resp', function(done) {
- vision.annotate(REQ, function(err, annotations, resp) {
- assert.ifError(err);
-
- assert.strictEqual(annotations, apiResponse.responses);
- assert.strictEqual(resp, apiResponse);
-
- done();
- });
- });
- });
- });
-
- describe('detect', function() {
- var TYPES = [
- 'face',
- 'label'
- ];
-
- var IMAGES = [
- {
- content: 'aGk='
- }
- ];
-
- var MULTIPLE_IMAGES = [
- IMAGES[0],
- IMAGES[0]
- ];
-
-
- beforeEach(function() {
- Vision.findImages_ = function(images, callback) {
- callback(null, IMAGES);
- };
- });
-
- it('should find the images', function(done) {
- Vision.findImages_ = function(images) {
- assert.strictEqual(images, IMAGE);
- done();
- };
-
- vision.detect(IMAGE, TYPES, assert.ifError);
- });
-
- it('should return an error from findImages_', function(done) {
- var error = new Error('Error.');
-
- Vision.findImages_ = function(images, callback) {
- assert.strictEqual(images, IMAGE);
- callback(error);
- };
-
- vision.detect(IMAGE, TYPES, function(err) {
- assert.strictEqual(err, error);
- done();
- });
- });
-
- it('should throw an error if a type does not exist', function() {
- var type = 'not-real-type';
-
- assert.throws(function() {
- vision.detect(IMAGE, type, assert.ifError);
- }, /Requested detection feature not found: not-real-type/);
- });
-
- it('should format the correct config', function(done) {
- var typeShortNameToFullName = {
- crop: 'CROP_HINTS',
- crops: 'CROP_HINTS',
-
- doc: 'DOCUMENT_TEXT_DETECTION',
- document: 'DOCUMENT_TEXT_DETECTION',
-
- face: 'FACE_DETECTION',
- faces: 'FACE_DETECTION',
-
- label: 'LABEL_DETECTION',
- labels: 'LABEL_DETECTION',
-
- landmark: 'LANDMARK_DETECTION',
- landmarks: 'LANDMARK_DETECTION',
-
- logo: 'LOGO_DETECTION',
- logos: 'LOGO_DETECTION',
-
- properties: 'IMAGE_PROPERTIES',
-
- safeSearch: 'SAFE_SEARCH_DETECTION',
-
- similar: 'WEB_DETECTION',
-
- text: 'TEXT_DETECTION'
- };
-
- var shortNames = Object.keys(typeShortNameToFullName);
-
- function checkConfig(shortName, callback) {
- vision.annotate = function(config) {
- assert.deepEqual(config, [
- {
- image: IMAGES[0],
- features: [
- {
- type: typeShortNameToFullName[shortName]
- }
- ]
- }
- ]);
-
- callback();
- };
-
- vision.detect(IMAGE, shortName, assert.ifError);
- }
-
- async.each(shortNames, checkConfig, done);
- });
-
- it('should allow setting imageContext', function(done) {
- var imageContext = {
- latLongRect: {
- minLatLng: {
- latitude: 37.420901,
- longitude: -122.081293
- },
- maxLatLng: {
- latitude: 37.423228,
- longitude: -122.086347
- }
- }
- };
-
- vision.annotate = function(config) {
- assert.deepEqual(config, [
- {
- image: IMAGES[0],
- features: [
- {
- type: 'LABEL_DETECTION'
- }
- ],
- imageContext: imageContext
- }
- ]);
-
- done();
- };
-
- vision.detect(IMAGE, {
- types: ['label'],
- imageContext: imageContext
- }, assert.ifError);
- });
-
- it('should allow setting maxResults', function(done) {
- var maxResults = 10;
-
- vision.annotate = function(config) {
- assert.deepEqual(config, [
- {
- image: IMAGES[0],
- features: [
- {
- type: 'FACE_DETECTION',
- maxResults: 10
- }
- ]
- }
- ]);
-
- done();
- };
-
- vision.detect(IMAGE, {
- types: ['face'],
- maxResults: maxResults
- }, assert.ifError);
- });
-
- it('should return empty detections when none were found', function(done) {
- vision.annotate = function(config, callback) {
- callback(null, [
- {},
- {}
- ]);
- };
-
- vision.detect(IMAGE, TYPES, function(err, detections) {
- assert.ifError(err);
- assert.deepEqual(detections, {
- faces: [],
- labels: []
- });
- done();
- });
- });
-
- it('should return the correct detections', function(done) {
- var annotations = [
- {
- cropHintsAnnotation: { anno: true }
- },
- {
- faceAnnotations: { anno: true }
- },
- {
- fullTextAnnotation: { anno: true }
- },
- {
- imagePropertiesAnnotation: { anno: true }
- },
- {
- labelAnnotations: { anno: true }
- },
- {
- landmarkAnnotations: { anno: true }
- },
- {
- logoAnnotations: { anno: true }
- },
- {
- safeSearchAnnotation: { anno: true }
- },
- {
- textAnnotations: { anno: true }
- },
- {
- webDetection: { anno: true }
- }
- ];
-
- var cropHintsAnnotation = {};
- var faceAnnotation = {};
- var fullTextAnnotation = {};
- var imagePropertiesAnnotation = {};
- var entityAnnotation = {};
- var safeSearchAnnotation = {};
- var webDetection = {};
-
- Vision.formatCropHintsAnnotation_ = function() {
- return cropHintsAnnotation;
- };
-
- Vision.formatFaceAnnotation_ = function() {
- return faceAnnotation;
- };
-
- Vision.formatFullTextAnnotation_ = function() {
- return fullTextAnnotation;
- };
-
- Vision.formatImagePropertiesAnnotation_ = function() {
- return imagePropertiesAnnotation;
- };
-
- Vision.formatEntityAnnotation_ = function() {
- return entityAnnotation;
- };
-
- Vision.formatSafeSearchAnnotation_ = function() {
- return safeSearchAnnotation;
- };
-
- Vision.formatWebDetection_ = function() {
- return webDetection;
- };
-
- vision.annotate = function(config, callback) {
- callback(null, annotations);
- };
-
- var expected = {
- crops: cropHintsAnnotation,
- faces: faceAnnotation,
- document: fullTextAnnotation,
- properties: imagePropertiesAnnotation,
- labels: entityAnnotation,
- landmarks: entityAnnotation,
- logos: entityAnnotation,
- safeSearch: safeSearchAnnotation,
- text: entityAnnotation,
- similar: webDetection
- };
-
- var types = Object.keys(expected);
-
- vision.detect(IMAGE, types, function(err, detections) {
- assert.ifError(err);
- assert(deepStrictEqual(detections, expected));
- done();
- });
- });
-
- it('should return an empty array for empty responses', function(done) {
- var annotations = [
- {}, // empty `faceAnnotations`
- {
- imagePropertiesAnnotation: {}
- }
- ];
-
- vision.annotate = function(config, callback) {
- callback(null, annotations);
- };
-
- var expected = {
- faces: [],
- properties: {}
- };
-
- var types = Object.keys(expected);
-
- vision.detect(IMAGE, types, function(err, detections) {
- assert.ifError(err);
-
- assert(deepStrictEqual(detections, expected));
-
- done();
- });
- });
-
- it('should return partial failure errors', function(done) {
- var error1 = { error: true };
- var error2 = { error: true };
-
- var annotations = [
- { error: error1 },
- { error: error2 }
- ];
-
- var types = ['faces', 'properties'];
-
- Vision.formatError_ = function(err) {
- err.formatted = true;
- return err;
- };
-
- vision.annotate = function(config, callback) {
- callback(null, annotations);
- };
-
- vision.detect(IMAGE, types, function(err, detections) {
- assert.strictEqual(err.name, 'PartialFailureError');
-
- assert.deepEqual(err.errors, [
- {
- image: IMAGE,
- errors: [
- extend(error1, {
- type: types[0],
- formatted: true
- }),
- extend(error2, {
- type: types[1],
- formatted: true
- })
- ]
- }
- ]);
-
- assert.deepEqual(detections, {});
-
- done();
- });
- });
-
- it('should return partial failure errors for multi images', function(done) {
- var error1 = { error: true };
- var error2 = { error: true };
- var error3 = { error: true };
- var error4 = { error: true };
-
- var annotations = [
- { error: error1 },
- { error: error2 },
- { error: error3 },
- { error: error4 }
- ];
-
- var images = ['./image.jpg', './image-2.jpg'];
- var types = ['faces', 'properties'];
-
- Vision.findImages_ = function(images, callback) {
- callback(null, MULTIPLE_IMAGES);
- };
-
- Vision.formatError_ = function(err) {
- err.formatted = true;
- return err;
- };
-
- vision.annotate = function(config, callback) {
- callback(null, annotations);
- };
-
- vision.detect(images, types, function(err, detections) {
- assert.strictEqual(err.name, 'PartialFailureError');
-
- assert.deepEqual(err.errors, [
- {
- image: images[0],
- errors: [
- extend(error1, {
- type: types[0],
- formatted: true
- }),
- extend(error2, {
- type: types[1],
- formatted: true
- })
- ]
- },
- {
- image: images[1],
- errors: [
- extend(error3, {
- type: types[0],
- formatted: true
- }),
- extend(error4, {
- type: types[1],
- formatted: true
- })
- ]
- }
- ]);
-
- assert.deepEqual(detections, [{}, {}]);
-
- done();
- });
- });
-
- it('should return only the detection wanted', function(done) {
- vision.annotate = function(config, callback) {
- callback(null, [{}]);
- };
-
- vision.detect(IMAGE, ['face'], function(err, detection) {
- assert.ifError(err);
-
- assert.deepEqual(detection, []);
-
- done();
- });
- });
-
- it('should return the correct detections for multiple img', function(done) {
- var anno = { a: 'b', c: 'd' };
-
- var annotations = [
- // Image 1 annotations:
- {
- faceAnnotations: anno
- },
- {
- imagePropertiesAnnotation: anno
- },
- {
- labelAnnotations: anno
- },
- {
- landmarkAnnotations: anno
- },
- {
- logoAnnotations: anno
- },
- {
- safeSearchAnnotation: anno
- },
- {
- textAnnotations: anno
- },
-
- // Image 2 annotations:
- {
- faceAnnotations: anno
- },
- {
- imagePropertiesAnnotation: anno
- },
- {
- labelAnnotations: anno
- },
- {
- landmarkAnnotations: anno
- },
- {
- logoAnnotations: anno
- },
- {
- safeSearchAnnotation: anno
- },
- {
- textAnnotations: anno
- }
- ];
-
- var faceAnnotation = {};
- var imagePropertiesAnnotation = {};
- var entityAnnotation = {};
- var safeSearchAnnotation = {};
-
- Vision.formatFaceAnnotation_ = function(anno_) {
- assert.strictEqual(anno_, anno);
- return faceAnnotation;
- };
-
- Vision.formatImagePropertiesAnnotation_ = function(anno_) {
- assert.strictEqual(anno_, anno);
- return imagePropertiesAnnotation;
- };
-
- Vision.formatEntityAnnotation_ = function(anno_) {
- assert.strictEqual(anno_, anno);
- return entityAnnotation;
- };
-
- Vision.formatSafeSearchAnnotation_ = function(anno_) {
- assert.strictEqual(anno_, anno);
- return safeSearchAnnotation;
- };
-
- Vision.findImages_ = function(images, callback) {
- callback(null, IMAGES.concat(IMAGES));
- };
-
- vision.annotate = function(config, callback) {
- callback(null, annotations);
- };
-
- var expected = [
- {
- faces: faceAnnotation,
- properties: imagePropertiesAnnotation,
- labels: entityAnnotation,
- landmarks: entityAnnotation,
- logos: entityAnnotation,
- safeSearch: safeSearchAnnotation,
- text: entityAnnotation
- },
- {
- faces: faceAnnotation,
- properties: imagePropertiesAnnotation,
- labels: entityAnnotation,
- landmarks: entityAnnotation,
- logos: entityAnnotation,
- safeSearch: safeSearchAnnotation,
- text: entityAnnotation
- }
- ];
-
- var types = Object.keys(expected[0]);
-
- vision.detect([IMAGE, IMAGE], types, function(err, detections) {
- assert.ifError(err);
- assert(deepStrictEqual(detections, expected));
- done();
- });
- });
-
- it('should return the raw annotation for unknown types', function(done) {
- var anno = { a: 'b', c: 'd' };
-
- var annotations = [
- {
- faceAnnotations: anno
- }
- ];
-
- Vision.formatFaceAnnotation_ = null;
-
- vision.annotate = function(config, callback) {
- callback(null, annotations);
- };
-
- vision.detect(IMAGE, 'faces', function(err, detections) {
- assert.ifError(err);
- assert.strictEqual(detections, anno);
- done();
- });
- });
-
- it('should return an error from annotate()', function(done) {
- var error = new Error('Error.');
- var apiResponse = {};
-
- vision.annotate = function(config, callback) {
- callback(error, null, apiResponse);
- };
-
- vision.detect(IMAGE, TYPES, function(err, annotations, resp) {
- assert.strictEqual(err, error);
- assert.strictEqual(annotations, null);
- assert.strictEqual(resp, apiResponse);
- done();
- });
- });
-
- it('should return the apiResponse from annotate()', function(done) {
- var apiResponse = {
- responses: [
- {
- faceAnnotations: {}
- }
- ]
- };
-
- var originalApiResponse = extend(true, {}, apiResponse);
-
- Vision.formatFaceAnnotation_ = function() {
- return {};
- };
-
- vision.annotate = function(config, callback) {
- callback(null, apiResponse.responses, apiResponse);
- };
-
- vision.detect(IMAGE, TYPES, function(err, annotations, resp) {
- assert.ifError(err);
-
- // assert.strictEqual(resp, apiResponse);
- assert.deepEqual(resp, originalApiResponse);
-
- done();
- });
- });
- });
-
- describe('detectCrops', function() {
- it('should accept a callback only', function(done) {
- vision.detect = testWithoutOptions('crops');
-
- vision.detectCrops(IMAGE, done);
- });
-
- it('should accept options', function(done) {
- var options = {
- a: 'b',
- c: 'd'
- };
-
- vision.detect = testWithOptions('crops', options);
-
- vision.detectCrops(IMAGE, options, done);
- });
- });
-
- describe('detectFaces', function() {
- it('should accept a callback only', function(done) {
- vision.detect = testWithoutOptions('faces');
-
- vision.detectFaces(IMAGE, done);
- });
-
- it('should accept options', function(done) {
- var options = {
- a: 'b',
- c: 'd'
- };
-
- vision.detect = testWithOptions('faces', options);
-
- vision.detectFaces(IMAGE, options, done);
- });
- });
-
- describe('detectLabels', function() {
- it('should accept a callback only', function(done) {
- vision.detect = testWithoutOptions('labels');
-
- vision.detectLabels(IMAGE, done);
- });
-
- it('should accept options', function(done) {
- var options = {
- a: 'b',
- c: 'd'
- };
-
- vision.detect = testWithOptions('labels', options);
-
- vision.detectLabels(IMAGE, options, done);
- });
- });
-
- describe('detectLandmarks', function() {
- it('should accept a callback only', function(done) {
- vision.detect = testWithoutOptions('landmarks');
-
- vision.detectLandmarks(IMAGE, done);
- });
-
- it('should accept options', function(done) {
- var options = {
- a: 'b',
- c: 'd'
- };
-
- vision.detect = testWithOptions('landmarks', options);
-
- vision.detectLandmarks(IMAGE, options, done);
- });
- });
-
- describe('detectLogos', function() {
- it('should accept a callback only', function(done) {
- vision.detect = testWithoutOptions('logos');
-
- vision.detectLogos(IMAGE, done);
- });
-
- it('should accept options', function(done) {
- var options = {
- a: 'b',
- c: 'd'
- };
-
- vision.detect = testWithOptions('logos', options);
-
- vision.detectLogos(IMAGE, options, done);
- });
- });
-
- describe('detectProperties', function() {
- it('should accept a callback only', function(done) {
- vision.detect = testWithoutOptions('properties');
-
- vision.detectProperties(IMAGE, done);
- });
-
- it('should accept options', function(done) {
- var options = {
- a: 'b',
- c: 'd'
- };
-
- vision.detect = testWithOptions('properties', options);
-
- vision.detectProperties(IMAGE, options, done);
- });
- });
-
- describe('detectSafeSearch', function() {
- it('should accept a callback only', function(done) {
- vision.detect = testWithoutOptions('safeSearch');
-
- vision.detectSafeSearch(IMAGE, done);
- });
-
- it('should accept options', function(done) {
- var options = {
- a: 'b',
- c: 'd'
- };
-
- vision.detect = testWithOptions('safeSearch', options);
-
- vision.detectSafeSearch(IMAGE, options, done);
- });
- });
-
- describe('detectSimilar', function() {
- it('should accept a callback only', function(done) {
- vision.detect = testWithoutOptions('similar');
-
- vision.detectSimilar(IMAGE, done);
- });
-
- it('should accept options', function(done) {
- var options = {
- a: 'b',
- c: 'd'
- };
-
- vision.detect = testWithOptions('similar', options);
-
- vision.detectSimilar(IMAGE, options, done);
- });
- });
-
- describe('detectText', function() {
- it('should accept a callback only', function(done) {
- vision.detect = testWithoutOptions('text');
-
- vision.detectText(IMAGE, done);
- });
-
- it('should accept options', function(done) {
- var options = {
- a: 'b',
- c: 'd'
- };
-
- vision.detect = testWithOptions('text', options);
-
- vision.detectText(IMAGE, options, done);
- });
- });
-
- describe('readDocument', function() {
- it('should accept a callback only', function(done) {
- vision.detect = testWithoutOptions('document');
-
- vision.readDocument(IMAGE, done);
- });
-
- it('should accept options', function(done) {
- var options = {
- a: 'b',
- c: 'd'
- };
-
- vision.detect = testWithOptions('document', options);
-
- vision.readDocument(IMAGE, options, done);
- });
- });
-
- describe('findImages_', function() {
- it('should return buffer for snippet sandbox', function(done) {
- global.GCLOUD_SANDBOX_ENV = true;
-
- Vision.findImages_({}, function(err, images) {
- delete global.GCLOUD_SANDBOX_ENV;
- assert.ifError(err);
-
- assert.deepEqual(images, [
- {
- content: new Buffer('')
- }
- ]);
-
- done();
- });
- });
-
- it('should convert a File object', function(done) {
- var file = {
- name: 'file-name',
- bucket: {
- name: 'bucket-name'
- }
- };
-
- var isCustomType = util.isCustomType;
-
- fakeUtil.isCustomType = function(unknown, type) {
- fakeUtil.isCustomType = isCustomType;
- assert.strictEqual(unknown, file);
- assert.strictEqual(type, 'storage/file');
- return true;
- };
-
- Vision.findImages_(file, function(err, images) {
- assert.ifError(err);
-
- assert.deepEqual(images, [
- {
- source: {
- gcsImageUri: 'gs://' + file.bucket.name + '/' + file.name
- }
- }
- ]);
-
- done();
- });
- });
-
- it('should properly format a URL', function(done) {
- var imageUri = 'http://www.google.com/logo.png';
-
- Vision.findImages_(imageUri, function(err, images) {
- assert.ifError(err);
- assert.deepEqual(images, [
- {
- source: {
- imageUri: imageUri
- }
- }
- ]);
- done();
- });
- });
-
- it('should read from a file path', function(done) {
- tmp.setGracefulCleanup();
-
- tmp.file(function tempFileCreated_(err, tmpFilePath) {
- assert.ifError(err);
-
- var contents = 'abcdef';
-
- function writeFile(callback) {
- fs.writeFile(tmpFilePath, contents, callback);
- }
-
- function convertFile(callback) {
- Vision.findImages_(tmpFilePath, callback);
- }
-
- async.waterfall([writeFile, convertFile], function(err, images) {
- assert.ifError(err);
-
- assert.deepEqual(images, [
- {
- content: new Buffer(contents).toString('base64')
- }
- ]);
-
- done();
- });
- });
- });
-
-
- it('should get content from a buffer', function(done) {
- var base64String = 'aGVsbG8gd29ybGQ=';
- var buffer = new Buffer(base64String, 'base64');
-
- Vision.findImages_(buffer, function(err, images) {
- assert.ifError(err);
- assert.deepEqual(images, [
- {
- content: base64String
- }
- ]);
- done();
- });
- });
-
- it('should return an error when file cannot be found', function(done) {
- Vision.findImages_('./not-real-file.png', function(err) {
- assert.strictEqual(err.code, 'ENOENT');
- done();
- });
- });
- });
-
- describe('formatCropHintsAnnotation_', function() {
- var VERTICES = [
- { x: 0, y: 0 },
- { x: 0, y: 0 }
- ];
-
- var CONFIDENCE = 0.3;
-
- var cropHintsAnnotation = {
- cropHints: [
- {
- boundingPoly: {
- vertices: VERTICES
- },
- confidence: CONFIDENCE
- }
- ]
- };
-
- describe('verbose: false', function() {
- var opts = {};
-
- it('should format the annotation', function() {
- var fmtd = Vision.formatCropHintsAnnotation_(cropHintsAnnotation, opts);
-
- assert.deepEqual(fmtd, [
- VERTICES
- ]);
- });
- });
-
- describe('verbose: true', function() {
- var opts = { verbose: true };
-
- it('should format the annotation', function() {
- var fmtd = Vision.formatCropHintsAnnotation_(cropHintsAnnotation, opts);
-
- assert.deepEqual(fmtd, [
- {
- bounds: VERTICES,
- confidence: CONFIDENCE
- }
- ]);
- });
- });
- });
-
- describe('formatEntityAnnotation_', function() {
- var entityAnnotation = {
- description: 'description',
- mid: 'mid',
- score: 0.4,
- boundingPoly: {
- vertices: {}
- },
- confidence: 0.2,
- locations: [
- {
- latLng: []
- }
- ],
- properties: {}
- };
-
- describe('verbose: false', function() {
- it('should just return the description', function() {
- var formatted = Vision.formatEntityAnnotation_(entityAnnotation, {});
-
- assert.strictEqual(formatted, entityAnnotation.description);
- });
- });
-
- describe('verbose: true', function() {
- var opts = {
- verbose: true
- };
-
- it('should format the entity annotation', function() {
- var formatted = Vision.formatEntityAnnotation_(entityAnnotation, opts);
-
- assert.deepEqual(formatted, {
- desc: entityAnnotation.description,
- mid: entityAnnotation.mid,
- score: entityAnnotation.score * 100,
- bounds: entityAnnotation.boundingPoly.vertices,
- confidence: entityAnnotation.confidence * 100,
- locations: entityAnnotation.locations.map(prop('latLng')),
- properties: entityAnnotation.properties
- });
- });
- });
- });
-
- describe('formatError_', function() {
- var error = {
- code: 1,
- message: 'Oh no!',
- details: [
- 'these should be clipped'
- ]
- };
-
- it('should format an error', function() {
- var err = Vision.formatError_(error);
-
- assert.deepEqual(err, {
- code: GrpcService.GRPC_ERROR_CODE_TO_HTTP[1].code,
- message: error.message
- });
- });
- });
-
- describe('formatFaceAnnotation_', function() {
- var faceAnnotation = {
- panAngle: {},
- rollAngle: {},
- tiltAngle: {},
-
- boundingPoly: {
- vertices: {}
- },
- fdBoundingPoly: {
- vertices: {}
- },
-
- landmarkingConfidence: 0.2,
-
- landmarks: [
- {
- type: 'CHIN_GNATHION',
- position: {}
- },
- {
- type: 'CHIN_LEFT_GONION',
- position: {}
- },
- {
- type: 'CHIN_RIGHT_GONION',
- position: {}
- },
- {
- type: 'LEFT_EAR_TRAGION',
- position: {}
- },
- {
- type: 'RIGHT_EAR_TRAGION',
- position: {}
- },
- {
- type: 'LEFT_OF_LEFT_EYEBROW',
- position: {}
- },
- {
- type: 'RIGHT_OF_LEFT_EYEBROW',
- position: {}
- },
- {
- type: 'LEFT_EYEBROW_UPPER_MIDPOINT',
- position: {}
- },
- {
- type: 'LEFT_OF_RIGHT_EYEBROW',
- position: {}
- },
- {
- type: 'RIGHT_OF_RIGHT_EYEBROW',
- position: {}
- },
- {
- type: 'RIGHT_EYEBROW_UPPER_MIDPOINT',
- position: {}
- },
- {
- type: 'LEFT_EYE_BOTTOM_BOUNDARY',
- position: {}
- },
- {
- type: 'LEFT_EYE',
- position: {}
- },
- {
- type: 'LEFT_EYE_LEFT_CORNER',
- position: {}
- },
- {
- type: 'LEFT_EYE_PUPIL',
- position: {}
- },
- {
- type: 'LEFT_EYE_RIGHT_CORNER',
- position: {}
- },
- {
- type: 'LEFT_EYE_TOP_BOUNDARY',
- position: {}
- },
- {
- type: 'RIGHT_EYE_BOTTOM_BOUNDARY',
- position: {}
- },
- {
- type: 'RIGHT_EYE',
- position: {}
- },
- {
- type: 'RIGHT_EYE_LEFT_CORNER',
- position: {}
- },
- {
- type: 'RIGHT_EYE_PUPIL',
- position: {}
- },
- {
- type: 'RIGHT_EYE_RIGHT_CORNER',
- position: {}
- },
- {
- type: 'RIGHT_EYE_TOP_BOUNDARY',
- position: {}
- },
- {
- type: 'FOREHEAD_GLABELLA',
- position: {}
- },
- {
- type: 'LOWER_LIP',
- position: {}
- },
- {
- type: 'UPPER_LIP',
- position: {}
- },
- {
- type: 'MOUTH_CENTER',
- position: {}
- },
- {
- type: 'MOUTH_LEFT',
- position: {}
- },
- {
- type: 'MOUTH_RIGHT',
- position: {}
- },
- {
- type: 'NOSE_BOTTOM_CENTER',
- position: {}
- },
- {
- type: 'NOSE_BOTTOM_LEFT',
- position: {}
- },
- {
- type: 'NOSE_BOTTOM_RIGHT',
- position: {}
- },
- {
- type: 'NOSE_TIP',
- position: {}
- },
- {
- type: 'MIDPOINT_BETWEEN_EYES',
- position: {}
- }
- ],
-
- detectionConfidence: 0.2,
- blurredLikelihood: 'LIKELY',
- underExposedLikelihood: 'LIKELY',
- joyLikelihood: 'LIKELY',
- headwearLikelihood: 'LIKELY',
- angerLikelihood: 'LIKELY',
- sorrowLikelihood: 'LIKELY',
- surpriseLikelihood: 'LIKELY',
-
- nonExistentLikelihood: 'LIKELY'
- };
-
- function findLandmark(type) {
- var landmarks = faceAnnotation.landmarks;
-
- return landmarks.filter(function(landmark) {
- return landmark.type === type;
- })[0].position;
- }
-
- it('should format the annotation', function() {
- var expected = {
- angles: {
- pan: faceAnnotation.panAngle,
- roll: faceAnnotation.rollAngle,
- tilt: faceAnnotation.tiltAngle
- },
-
- bounds: {
- head: faceAnnotation.boundingPoly.vertices,
- face: faceAnnotation.fdBoundingPoly.vertices
- },
-
- features: {
- confidence: faceAnnotation.landmarkingConfidence * 100,
- chin: {
- center: findLandmark('CHIN_GNATHION'),
- left: findLandmark('CHIN_LEFT_GONION'),
- right: findLandmark('CHIN_RIGHT_GONION')
- },
- ears: {
- left: findLandmark('LEFT_EAR_TRAGION'),
- right: findLandmark('RIGHT_EAR_TRAGION'),
- },
- eyebrows: {
- left: {
- left: findLandmark('LEFT_OF_LEFT_EYEBROW'),
- right: findLandmark('RIGHT_OF_LEFT_EYEBROW'),
- top: findLandmark('LEFT_EYEBROW_UPPER_MIDPOINT')
- },
- right: {
- left: findLandmark('LEFT_OF_RIGHT_EYEBROW'),
- right: findLandmark('RIGHT_OF_RIGHT_EYEBROW'),
- top: findLandmark('RIGHT_EYEBROW_UPPER_MIDPOINT')
- }
- },
- eyes: {
- left: {
- bottom: findLandmark('LEFT_EYE_BOTTOM_BOUNDARY'),
- center: findLandmark('LEFT_EYE'),
- left: findLandmark('LEFT_EYE_LEFT_CORNER'),
- pupil: findLandmark('LEFT_EYE_PUPIL'),
- right: findLandmark('LEFT_EYE_RIGHT_CORNER'),
- top: findLandmark('LEFT_EYE_TOP_BOUNDARY')
- },
- right: {
- bottom: findLandmark('RIGHT_EYE_BOTTOM_BOUNDARY'),
- center: findLandmark('RIGHT_EYE'),
- left: findLandmark('RIGHT_EYE_LEFT_CORNER'),
- pupil: findLandmark('RIGHT_EYE_PUPIL'),
- right: findLandmark('RIGHT_EYE_RIGHT_CORNER'),
- top: findLandmark('RIGHT_EYE_TOP_BOUNDARY')
- }
- },
- forehead: findLandmark('FOREHEAD_GLABELLA'),
- lips: {
- bottom: findLandmark('LOWER_LIP'),
- top: findLandmark('UPPER_LIP')
- },
- mouth: {
- center: findLandmark('MOUTH_CENTER'),
- left: findLandmark('MOUTH_LEFT'),
- right: findLandmark('MOUTH_RIGHT')
- },
- nose: {
- bottom: {
- center: findLandmark('NOSE_BOTTOM_CENTER'),
- left: findLandmark('NOSE_BOTTOM_LEFT'),
- right: findLandmark('NOSE_BOTTOM_RIGHT')
- },
- tip: findLandmark('NOSE_TIP'),
- top: findLandmark('MIDPOINT_BETWEEN_EYES')
- }
- },
-
- confidence: faceAnnotation.detectionConfidence * 100,
-
- anger: true,
- angerLikelihood: 3,
- blurred: true,
- blurredLikelihood: 3,
- headwear: true,
- headwearLikelihood: 3,
- joy: true,
- joyLikelihood: 3,
- sorrow: true,
- sorrowLikelihood: 3,
- surprise: true,
- surpriseLikelihood: 3,
- underExposed: true,
- underExposedLikelihood: 3,
-
- // Checks that *any* property that ends in `Likelihood` is shortened.
- nonExistent: true,
- nonExistentLikelihood: 3
- };
-
- var formatted = Vision.formatFaceAnnotation_(faceAnnotation);
-
- assert(deepStrictEqual(formatted, expected));
- });
- });
-
- describe('formatFullTextAnnotation_', function() {
- var BLOCK_TYPE = 'block type';
-
- var LANGUAGE_CODE = 'language code';
-
- var TEXT = 'F';
-
- var VERTICES = [
- { x: 0, y: 0 },
- { x: 0, y: 0 },
- { x: 0, y: 0 },
- { x: 0, y: 0 }
- ];
-
- var fullTextAnnotation = {
- text: 'Full text',
- pages: [
- {
- property: {
- detectedLanguages: [
- {
- languageCode: LANGUAGE_CODE
- }
- ]
- },
- width: 50,
- height: 100,
- blocks: [
- {
- blockType: BLOCK_TYPE,
- boundingBox: {
- vertices: VERTICES
- },
- paragraphs: [
- {
- boundingBox: {
- vertices: VERTICES
- },
- words: [
- {
- boundingBox: {
- vertices: VERTICES
- },
- symbols: [
- {
- boundingBox: {
- vertices: VERTICES
- },
- text: TEXT
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- };
-
- describe('verbose: false', function() {
- var opts = {};
-
- it('should return text property', function() {
- var fmtd = Vision.formatFullTextAnnotation_(fullTextAnnotation, opts);
-
- assert.strictEqual(fmtd, fullTextAnnotation.text);
- });
- });
-
- describe('verbose: true', function() {
- var opts = { verbose: true };
-
- it('should return formatted annotation', function() {
- var fmtd = Vision.formatFullTextAnnotation_(fullTextAnnotation, opts);
-
- assert.deepEqual(fmtd, [
- {
- languages: [
- LANGUAGE_CODE
- ],
- width: 50,
- height: 100,
- blocks: [
- {
- type: BLOCK_TYPE,
- bounds: VERTICES,
- paragraphs: [
- {
- bounds: VERTICES,
- words: [
- {
- bounds: VERTICES,
- symbols: [
- {
- bounds: VERTICES,
- text: TEXT
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]);
- });
-
- it('should not require a bounding block box', function() {
- var annoWithoutBounding = extend(true, {}, fullTextAnnotation);
- delete annoWithoutBounding.pages[0].blocks[0].boundingBox;
-
- var fmtd = Vision.formatFullTextAnnotation_(annoWithoutBounding, opts);
-
- assert.deepEqual(fmtd[0].blocks[0].bounds, []);
- });
- });
- });
-
- describe('formatImagePropertiesAnnotation_', function() {
- var imgAnnotation = {
- dominantColors: {
- colors: [
- {
- color: {
- red: 255,
- green: 255,
- blue: 255
- },
- pixelFraction: 0.8,
- score: 0.2
- }
- ]
- }
- };
-
- describe('verbose: false', function() {
- var opts = {};
-
- it('should format the annotation', function() {
- var fmtd = Vision.formatImagePropertiesAnnotation_(imgAnnotation, opts);
-
- assert.deepEqual(fmtd, {
- colors: ['ffffff']
- });
- });
- });
-
- describe('verbose: true', function() {
- var opts = {
- verbose: true
- };
-
- it('should format the annotation', function() {
- var fmtd = Vision.formatImagePropertiesAnnotation_(imgAnnotation, opts);
-
- assert.deepEqual(fmtd, {
- colors: [
- {
- red: 255,
- green: 255,
- blue: 255,
- hex: 'ffffff',
- coverage: 80,
- score: 20
- }
- ]
- });
- });
- });
- });
-
- describe('formatSafeSearchAnnotation_', function() {
- var safeSearchAnno = {
- adult: 'LIKELY',
- medical: 'LIKELY',
- spoof: 'LIKELY',
- violence: 'LIKELY'
- };
-
- describe('verbose: false', function() {
- var opts = {};
-
- it('should convert values to a boolean', function() {
- var fmtd = Vision.formatSafeSearchAnnotation_(safeSearchAnno, opts);
-
- assert.deepEqual(fmtd, {
- adult: true,
- medical: true,
- spoof: true,
- violence: true
- });
- });
- });
-
- describe('verbose: true', function() {
- var opts = {
- verbose: true
- };
-
- it('should return raw response', function() {
- var fmtd = Vision.formatSafeSearchAnnotation_(safeSearchAnno, opts);
-
- assert.strictEqual(fmtd, safeSearchAnno);
- });
- });
- });
-
- describe('formatWebDetection_', function() {
- var webDetection = {
- webEntities: [
- {
- description: 'description'
- },
- ],
-
- fullMatchingImages: [
- {
- score: 0,
- url: 'http://full-0'
- },
- {
- score: 1,
- url: 'http://full-1'
- }
- ],
-
- partialMatchingImages: [
- {
- score: 0,
- url: 'http://partial-0'
- },
- {
- score: 1,
- url: 'http://partial-1'
- }
- ],
-
- pagesWithMatchingImages: [
- {
- score: 0,
- url: 'http://page-0'
- },
- {
- score: 1,
- url: 'http://page-1'
- }
- ]
- };
-
- describe('verbose: false', function() {
- var opts = {};
-
- it('should return sorted & combined image urls', function() {
- var fmtd = Vision.formatWebDetection_(webDetection, opts);
-
- assert.deepEqual(fmtd, [
- 'http://full-1',
- 'http://full-0',
- 'http://partial-1',
- 'http://partial-0'
- ]);
- });
- });
-
- describe('verbose: true', function() {
- var opts = {
- verbose: true
- };
-
- it('should return entities, pages & individual, sorted urls', function() {
- var fmtd = Vision.formatWebDetection_(webDetection, opts);
-
- assert.deepEqual(fmtd, {
- entities: webDetection.webEntities.map(prop('description')),
- fullMatches: [
- 'http://full-1',
- 'http://full-0'
- ],
- partialMatches: [
- 'http://partial-1',
- 'http://partial-0'
- ],
- pages: [
- 'http://page-1',
- 'http://page-0'
- ]
- });
- });
- });
- });
-
- describe('gteLikelihood_', function() {
- it('should return booleans', function() {
- var baseLikelihood = Vision.likelihood.LIKELY;
-
- assert.strictEqual(
- Vision.gteLikelihood_(baseLikelihood, 'VERY_UNLIKELY'),
- false
- );
-
- assert.strictEqual(
- Vision.gteLikelihood_(baseLikelihood, 'UNLIKELY'),
- false
- );
-
- assert.strictEqual(
- Vision.gteLikelihood_(baseLikelihood, 'POSSIBLE'),
- false
- );
-
- assert.strictEqual(
- Vision.gteLikelihood_(baseLikelihood, 'LIKELY'),
- true
- );
-
- assert.strictEqual(
- Vision.gteLikelihood_(baseLikelihood, 'VERY_LIKELY'),
- true
- );
- });
- });
-
- function testWithoutOptions(type) {
- return function(images, options, callback) {
- assert.strictEqual(images, IMAGE);
- assert.deepEqual(options, {
- types: [type]
- });
- callback(); // done()
- };
- }
-
- function testWithOptions(type, options) {
- return function(images, options_, callback) {
- assert.strictEqual(images, IMAGE);
- assert.notStrictEqual(options_, options);
- assert.deepEqual(options_, extend({}, options, {
- types: [type]
- }));
- callback(); // done()
- };
- }
-});
diff --git a/packages/vision/test/index.test.js b/packages/vision/test/index.test.js
new file mode 100644
index 00000000000..7da32e2d69b
--- /dev/null
+++ b/packages/vision/test/index.test.js
@@ -0,0 +1,42 @@
+/**
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+'use strict';
+
+var assert = require('assert');
+
+var Vision = require('../');
+
+
+describe('Vision', () => {
+ describe('v1', () => {
+ it('returns a v1 GAPIC augmented with helpers', () => {
+ var vision = Vision.v1();
+
+ // Assert that the GAPIC v1 methods are present on the object.
+ assert(vision.batchAnnotateImages instanceof Function);
+
+ // Assert that the manual single-image helper method is present
+ // on the object.
+ assert(vision.annotateImage instanceof Function);
+
+ // Assert that some of the expected single-feature helper methods
+ // are present on the object.
+ assert(vision.faceDetection instanceof Function);
+ assert(vision.logoDetection instanceof Function);
+ });
+ });
+});
diff --git a/scripts/docs/config.js b/scripts/docs/config.js
index 53a9753c821..fbb4974401e 100644
--- a/scripts/docs/config.js
+++ b/scripts/docs/config.js
@@ -22,6 +22,7 @@ module.exports = {
TYPES_DICT: 'types.json',
TOC: 'toc.json',
IGNORE: [
+ '**/helpers.js',
'common',
'common-grpc',
'bigtable/src/mutation.js',
diff --git a/scripts/helpers.js b/scripts/helpers.js
index ba1cb63a9c2..c78a407fb0e 100644
--- a/scripts/helpers.js
+++ b/scripts/helpers.js
@@ -243,6 +243,7 @@ Module.prototype.runSystemTests = function() {
'error-reporting',
'google-cloud',
'monitoring',
+ 'speech',
'video-intelligence'
];
diff --git a/test/docs.js b/test/docs.js
index 295e8ca3aec..2f2428dfe10 100644
--- a/test/docs.js
+++ b/test/docs.js
@@ -221,6 +221,9 @@ modules.forEach(function(mod) {
it('should run ' + name + ' examples without errors', function() {
jshint(snippet, {
+ // Allow ES6 syntax
+ esversion: 6,
+
// in several snippets we give an example as to how to access
// a property (like metadata) without doing anything with it
// e.g. `list[0].metadata`