diff --git a/speech/package.json b/speech/package.json index 63b3f55254..4ce70045cb 100644 --- a/speech/package.json +++ b/speech/package.json @@ -5,17 +5,13 @@ "license": "Apache Version 2.0", "author": "Google Inc.", "scripts": { - "test": "mocha -R spec -t 10000 --require intelli-espower-loader ../test/_setup.js test/*.test.js", - "system-test": "mocha -R spec -t 10000 --require intelli-espower-loader ../system-test/_setup.js system-test/*.test.js" + "test": "cd ..; npm run st -- speech/system-test/*.test.js" }, "dependencies": { "@google-cloud/speech": "^0.4.0", "node-record-lpcm16": "^0.1.4", "yargs": "^6.4.0" }, - "devDependencies": { - "mocha": "^3.1.2" - }, "engines": { "node": ">=4.3.2" } diff --git a/speech/quickstart.js b/speech/quickstart.js index fa28527ca3..bc2cde2bd4 100644 --- a/speech/quickstart.js +++ b/speech/quickstart.js @@ -37,12 +37,9 @@ const options = { }; // Detects speech in the audio file -speechClient.recognize(fileName, options, (err, result) => { - if (err) { - console.error(err); - return; - } - - console.log(`Transcription: ${result}`); -}); +speechClient.recognize(fileName, options) + .then((results) => { + const transcription = results[0]; + console.log(`Transcription: ${transcription}`); + }); // [END speech_quickstart] diff --git a/speech/recognize.js b/speech/recognize.js index be5beb4129..45112d48b9 100644 --- a/speech/recognize.js +++ b/speech/recognize.js @@ -23,54 +23,66 @@ 'use strict'; -const fs = require('fs'); -const record = require('node-record-lpcm16'); -const speech = require('@google-cloud/speech')(); +const Speech = require('@google-cloud/speech'); // [START speech_sync_recognize] -function syncRecognize (filename, callback) { - // Detect speech in the audio file, e.g. "./resources/audio.raw" - speech.recognize(filename, { +function syncRecognize (filename) { + // Instantiates a client + const speech = Speech(); + + const config = { + // Configure these settings based on the audio you're transcribing encoding: 'LINEAR16', sampleRate: 16000 - }, (err, results) => { - if (err) { - callback(err); - return; - } + }; - console.log('Results:', results); - callback(); - }); + // Detects speech in the audio file, e.g. "./resources/audio.raw" + return speech.recognize(filename, config) + .then((results) => { + const transcription = results[0]; + console.log(`Transcription: ${transcription}`); + return transcription; + }); } // [END speech_sync_recognize] // [START speech_async_recognize] -function asyncRecognize (filename, callback) { - // Detect speech in the audio file, e.g. "./resources/audio.raw" - speech.startRecognition(filename, { +function asyncRecognize (filename) { + // Instantiates a client + const speech = Speech(); + + const config = { + // Configure these settings based on the audio you're transcribing encoding: 'LINEAR16', sampleRate: 16000 - }, (err, operation) => { - if (err) { - callback(err); - return; - } + }; - operation - .on('error', callback) - .on('complete', (results) => { - console.log('Results:', results); - callback(); - }); - }); + // Detects speech in the audio file, e.g. "./resources/audio.raw" + // This creates a recognition job that you can wait for now, or get its result + // later. + return speech.startRecognition(filename, config) + .then((results) => { + const operation = results[0]; + // Get a Promise represention the final result of the job + return operation.promise(); + }) + .then((transcription) => { + console.log(`Transcription: ${transcription}`); + return transcription; + }); } // [END speech_async_recognize] // [START speech_streaming_recognize] +const fs = require('fs'); + function streamingRecognize (filename, callback) { + // Instantiates a client + const speech = Speech(); + const options = { config: { + // Configure these settings based on the audio you're transcribing encoding: 'LINEAR16', sampleRate: 16000 } @@ -90,9 +102,15 @@ function streamingRecognize (filename, callback) { // [END speech_streaming_recognize] // [START speech_streaming_mic_recognize] -function streamingMicRecognize (filename) { +const record = require('node-record-lpcm16'); + +function streamingMicRecognize () { + // Instantiates a client + const speech = Speech(); + const options = { config: { + // Configure these settings based on the audio you're transcribing encoding: 'LINEAR16', sampleRate: 16000 } @@ -110,43 +128,39 @@ function streamingMicRecognize (filename) { } // [END speech_streaming_mic_recognize] -// The command-line program -var cli = require('yargs'); -var utils = require('../utils'); - -var program = module.exports = { - syncRecognize: syncRecognize, - asyncRecognize: asyncRecognize, - streamingRecognize: streamingRecognize, - streamingMicRecognize: streamingMicRecognize, - main: function (args) { - // Run the command-line program - cli.help().strict().parse(args).argv; - } -}; - -cli +require(`yargs`) .demand(1) - .command('sync ', 'Detects speech in an audio file.', {}, function (options) { - program.syncRecognize(options.filename, utils.makeHandler(false)); - }) - .command('async ', 'Creates a job to detect speech in an audio file, and waits for the job to complete.', {}, function (options) { - program.asyncRecognize(options.filename, utils.makeHandler(false)); - }) - .command('stream ', 'Detects speech in an audio file by streaming it to the Speech API.', {}, function (options) { - program.streamingRecognize(options.filename, utils.makeHandler(false)); - }) - .command('listen', 'Detects speech in a microphone input stream.', {}, function () { - program.streamingMicRecognize(); - }) - .example('node $0 sync ./resources/audio.raw', 'Detects speech in "./resources/audio.raw".') - .example('node $0 async ./resources/audio.raw', 'Creates a job to detect speech in "./resources/audio.raw", and waits for the job to complete.') - .example('node $0 stream ./resources/audio.raw', 'Detects speech in "./resources/audio.raw" by streaming it to the Speech API.') - .example('node $0 listen', 'Detects speech in a microphone input stream.') + .command( + `sync `, + `Detects speech in an audio file.`, + {}, + (opts) => syncRecognize(opts.filename) + ) + .command( + `async `, + `Creates a job to detect speech in an audio file, and waits for the job to complete.`, + {}, + (opts) => asyncRecognize(opts.filename) + ) + .command( + `stream `, + `Detects speech in an audio file by streaming it to the Speech API.`, + {}, + (opts) => streamingRecognize(opts.filename, () => {}) + ) + .command( + `listen`, + `Detects speech in a microphone input stream.`, + {}, + streamingMicRecognize + ) + .example(`node $0 sync ./resources/audio.raw`) + .example(`node $0 async ./resources/audio.raw`) + .example(`node $0 stream ./resources/audio.raw`) + .example(`node $0 listen`) .wrap(120) .recommendCommands() - .epilogue('For more information, see https://cloud.google.com/speech/docs'); - -if (module === require.main) { - program.main(process.argv.slice(2)); -} + .epilogue(`For more information, see https://cloud.google.com/speech/docs`) + .help() + .strict() + .argv; diff --git a/speech/system-test/quickstart.test.js b/speech/system-test/quickstart.test.js index 1dbc4b5af1..1b24b541f4 100644 --- a/speech/system-test/quickstart.test.js +++ b/speech/system-test/quickstart.test.js @@ -26,33 +26,33 @@ const config = { }; describe(`speech:quickstart`, () => { - let speechMock, SpeechMock; - it(`should detect speech`, (done) => { const expectedFileName = `./resources/audio.raw`; const expectedText = `how old is the Brooklyn Bridge`; - speechMock = { - recognize: (_fileName, _config, _callback) => { + const speechMock = { + recognize: (_fileName, _config) => { assert.equal(_fileName, expectedFileName); assert.deepEqual(_config, config); - assert.equal(typeof _callback, `function`); - - speech.recognize(fileName, config, (err, transcription, apiResponse) => { - _callback(err, transcription, apiResponse); - assert.ifError(err); - assert.equal(transcription, expectedText); - assert.notEqual(apiResponse, undefined); - assert.equal(console.log.calledOnce, true); - assert.deepEqual(console.log.firstCall.args, [`Transcription: ${expectedText}`]); - done(); - }); + + return speech.recognize(fileName, config) + .then((results) => { + const transcription = results[0]; + assert.equal(transcription, expectedText); + + setTimeout(() => { + assert.equal(console.log.callCount, 1); + assert.deepEqual(console.log.getCall(0).args, [`Transcription: ${expectedText}`]); + done(); + }, 200); + + return results; + }); } }; - SpeechMock = sinon.stub().returns(speechMock); proxyquire(`../quickstart`, { - '@google-cloud/speech': SpeechMock + '@google-cloud/speech': sinon.stub().returns(speechMock) }); }); }); diff --git a/speech/system-test/recognize.test.js b/speech/system-test/recognize.test.js index 8249321d2c..2653725fd3 100644 --- a/speech/system-test/recognize.test.js +++ b/speech/system-test/recognize.test.js @@ -25,14 +25,14 @@ const text = `how old is the Brooklyn Bridge`; describe(`speech:recognize`, () => { it(`should run sync recognize`, () => { - assert.equal(run(`${cmd} sync ${filename}`, cwd), `Results: ${text}`); + assert.equal(run(`${cmd} sync ${filename}`, cwd), `Transcription: ${text}`); }); it(`should run async recognize`, () => { - assert.equal(run(`${cmd} async ${filename}`, cwd), `Results: ${text}`); + assert.equal(run(`${cmd} async ${filename}`, cwd), `Transcription: ${text}`); }); it(`should run streaming recognize`, () => { - assert.notEqual(run(`${cmd} stream ${filename}`, cwd).indexOf(text), -1); + assert.equal(run(`${cmd} stream ${filename}`, cwd).includes(text), true); }); }); diff --git a/speech/test/quickstart.test.js b/speech/test/quickstart.test.js deleted file mode 100644 index 807e23e507..0000000000 --- a/speech/test/quickstart.test.js +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2016, Google, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -'use strict'; - -const proxyquire = require(`proxyquire`).noCallThru(); - -const config = { - encoding: 'LINEAR16', - sampleRate: 16000 -}; - -describe(`speech:quickstart`, () => { - let speechMock, SpeechMock; - const error = new Error(`error`); - const fileName = `./resources/audio.raw`; - - before(() => { - speechMock = { - recognize: sinon.stub().yields(error) - }; - SpeechMock = sinon.stub().returns(speechMock); - }); - - it(`should handle error`, () => { - proxyquire(`../quickstart`, { - '@google-cloud/speech': SpeechMock - }); - - assert.equal(SpeechMock.calledOnce, true); - assert.deepEqual(SpeechMock.firstCall.args, [{ projectId: 'YOUR_PROJECT_ID' }]); - assert.equal(speechMock.recognize.calledOnce, true); - assert.deepEqual(speechMock.recognize.firstCall.args.slice(0, -1), [fileName, config]); - assert.equal(console.error.calledOnce, true); - assert.deepEqual(console.error.firstCall.args, [error]); - }); -}); diff --git a/speech/test/recognize.test.js b/speech/test/recognize.test.js deleted file mode 100644 index 89747c0245..0000000000 --- a/speech/test/recognize.test.js +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2016, Google, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -'use strict'; - -const proxyquire = require(`proxyquire`).noCallThru(); - -describe(`speech:recognize`, () => { - it(`should handle errors`, () => { - const filename = `audio.raw`; - const error = new Error(`error`); - const callback = sinon.spy(); - const speechMock = { - recognize: sinon.stub().yields(error), - startRecognition: sinon.stub().yields(error) - }; - const SpeechMock = sinon.stub().returns(speechMock); - const program = proxyquire(`../recognize`, { - '@google-cloud/speech': SpeechMock - }); - - program.syncRecognize(filename, callback); - program.asyncRecognize(filename, callback); - - assert.equal(callback.callCount, 2); - assert.equal(callback.alwaysCalledWithExactly(error), true); - }); -});