Skip to content
This repository has been archived by the owner on Feb 18, 2024. It is now read-only.

Commit

Permalink
Enable prefer-const in the eslint config (#181)
Browse files Browse the repository at this point in the history
  • Loading branch information
JustinBeckwith authored and alexander-fenster committed Sep 20, 2018
1 parent 1c3163d commit 8c8a78f
Show file tree
Hide file tree
Showing 10 changed files with 102 additions and 100 deletions.
1 change: 1 addition & 0 deletions .eslintrc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,4 @@ rules:
eqeqeq: error
no-warning-comments: warn
no-var: error
prefer-const: error
3 changes: 2 additions & 1 deletion smoke-test/speech_smoke_test.js
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ describe('SpeechSmokeTest', () => {
config: config,
audio: audio,
};
client.recognize(request)
client
.recognize(request)
.then(responses => {
const response = responses[0];
console.log(response);
Expand Down
18 changes: 9 additions & 9 deletions src/helpers.js
Original file line number Diff line number Diff line change
Expand Up @@ -16,17 +16,17 @@

'use strict';

let common = require('@google-cloud/common');
let pumpify = require('pumpify');
let streamEvents = require('stream-events');
let through = require('through2');
const common = require('@google-cloud/common');
const pumpify = require('pumpify');
const streamEvents = require('stream-events');
const through = require('through2');

/*!
* Return a dictionary-like object with helpers to augment the Speech
* GAPIC.
*/
module.exports = () => {
let methods = {};
const methods = {};

/**
* Performs bidirectional streaming speech recognition: receive results while
Expand Down Expand Up @@ -69,9 +69,9 @@ module.exports = () => {
}

// Format the audio content as input request for pipeline
let recognizeStream = streamEvents(pumpify.obj());
const recognizeStream = streamEvents(pumpify.obj());

let requestStream = this._innerApiCalls
const requestStream = this._innerApiCalls
.streamingRecognize(options)
.on('error', err => {
recognizeStream.destroy(err);
Expand All @@ -87,7 +87,7 @@ module.exports = () => {
// config) is delayed until we get the first burst of data.
recognizeStream.once('writing', () => {
// The first message should contain the streaming config.
let firstMessage = true;
const firstMessage = true;

// Set up appropriate piping between the stream returned by
// the underlying API method and the one that we return.
Expand All @@ -96,7 +96,7 @@ module.exports = () => {
// This entails that the user sends raw audio; it is wrapped in
// the appropriate request structure.
through.obj((obj, _, next) => {
let payload = {};
const payload = {};
if (firstMessage && config !== undefined) {
// Write the initial configuration to the stream.
payload.streamingConfig = config;
Expand Down
4 changes: 2 additions & 2 deletions src/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ const gapic = Object.freeze({
});

// Augment the SpeechClient objects with the helpers.
for (let gapicVersion of Object.keys(gapic)) {
let clientProto = gapic[gapicVersion].SpeechClient.prototype;
for (const gapicVersion of Object.keys(gapic)) {
const clientProto = gapic[gapicVersion].SpeechClient.prototype;
Object.assign(clientProto, helpers());
}

Expand Down
2 changes: 1 addition & 1 deletion src/v1/speech_client.js
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ class SpeechClient {
'longRunningRecognize',
'streamingRecognize',
];
for (let methodName of speechStubMethods) {
for (const methodName of speechStubMethods) {
this._innerApiCalls[methodName] = gax.createApiCall(
speechStub.then(
stub =>
Expand Down
2 changes: 1 addition & 1 deletion src/v1p1beta1/speech_client.js
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ class SpeechClient {
'longRunningRecognize',
'streamingRecognize',
];
for (let methodName of speechStubMethods) {
for (const methodName of speechStubMethods) {
this._innerApiCalls[methodName] = gax.createApiCall(
speechStub.then(
stub =>
Expand Down
18 changes: 9 additions & 9 deletions system-test/speech_smoke_test.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,30 +18,30 @@ describe('SpeechSmokeTest', () => {
it('successfully makes a call to the service', done => {
const speech = require('../src');

let client = new speech.v1.SpeechClient({
const client = new speech.v1.SpeechClient({
// optional auth parameters.
});

let languageCode = 'en-US';
let sampleRateHertz = 44100;
let encoding = 'FLAC';
let config = {
const languageCode = 'en-US';
const sampleRateHertz = 44100;
const encoding = 'FLAC';
const config = {
languageCode: languageCode,
sampleRateHertz: sampleRateHertz,
encoding: encoding,
};
let uri = 'gs://gapic-toolkit/hello.flac';
let audio = {
const uri = 'gs://gapic-toolkit/hello.flac';
const audio = {
uri: uri,
};
let request = {
const request = {
config: config,
audio: audio,
};
client
.recognize(request)
.then(responses => {
let response = responses[0];
const response = responses[0];
console.log(response);
})
.then(done)
Expand Down
18 changes: 9 additions & 9 deletions system-test/speech_smoke_test_v1p1beta1.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,30 +18,30 @@ describe('SpeechSmokeTest v1p1beta1', () => {
it('successfully makes a call to the service', done => {
const speech = require('../src');

let client = new speech.v1p1beta1.SpeechClient({
const client = new speech.v1p1beta1.SpeechClient({
// optional auth parameters.
});

let languageCode = 'en-US';
let sampleRateHertz = 44100;
let encoding = 'FLAC';
let config = {
const languageCode = 'en-US';
const sampleRateHertz = 44100;
const encoding = 'FLAC';
const config = {
languageCode: languageCode,
sampleRateHertz: sampleRateHertz,
encoding: encoding,
};
let uri = 'gs://gapic-toolkit/hello.flac';
let audio = {
const uri = 'gs://gapic-toolkit/hello.flac';
const audio = {
uri: uri,
};
let request = {
const request = {
config: config,
audio: audio,
};
client
.recognize(request)
.then(responses => {
let response = responses[0];
const response = responses[0];
console.log(response);
})
.then(done)
Expand Down
80 changes: 40 additions & 40 deletions test/gapic-v1.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,38 +18,38 @@ const assert = require('assert');

const speechModule = require('../src');

let FAKE_STATUS_CODE = 1;
let error = new Error();
const FAKE_STATUS_CODE = 1;
const error = new Error();
error.code = FAKE_STATUS_CODE;

describe('SpeechClient', () => {
describe('recognize', () => {
it('invokes recognize without error', done => {
let client = new speechModule.v1.SpeechClient({
const client = new speechModule.v1.SpeechClient({
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});

// Mock request
let encoding = 'FLAC';
let sampleRateHertz = 44100;
let languageCode = 'en-US';
let config = {
const encoding = 'FLAC';
const sampleRateHertz = 44100;
const languageCode = 'en-US';
const config = {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
};
let uri = 'gs://bucket_name/file_name.flac';
let audio = {
const uri = 'gs://bucket_name/file_name.flac';
const audio = {
uri: uri,
};
let request = {
const request = {
config: config,
audio: audio,
};

// Mock response
let expectedResponse = {};
const expectedResponse = {};

// Mock Grpc layer
client._innerApiCalls.recognize = mockSimpleGrpcMethod(
Expand All @@ -65,25 +65,25 @@ describe('SpeechClient', () => {
});

it('invokes recognize with error', done => {
let client = new speechModule.v1.SpeechClient({
const client = new speechModule.v1.SpeechClient({
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});

// Mock request
let encoding = 'FLAC';
let sampleRateHertz = 44100;
let languageCode = 'en-US';
let config = {
const encoding = 'FLAC';
const sampleRateHertz = 44100;
const languageCode = 'en-US';
const config = {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
};
let uri = 'gs://bucket_name/file_name.flac';
let audio = {
const uri = 'gs://bucket_name/file_name.flac';
const audio = {
uri: uri,
};
let request = {
const request = {
config: config,
audio: audio,
};
Expand All @@ -106,31 +106,31 @@ describe('SpeechClient', () => {

describe('longRunningRecognize', function() {
it('invokes longRunningRecognize without error', done => {
let client = new speechModule.v1.SpeechClient({
const client = new speechModule.v1.SpeechClient({
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});

// Mock request
let encoding = 'FLAC';
let sampleRateHertz = 44100;
let languageCode = 'en-US';
let config = {
const encoding = 'FLAC';
const sampleRateHertz = 44100;
const languageCode = 'en-US';
const config = {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
};
let uri = 'gs://bucket_name/file_name.flac';
let audio = {
const uri = 'gs://bucket_name/file_name.flac';
const audio = {
uri: uri,
};
let request = {
const request = {
config: config,
audio: audio,
};

// Mock response
let expectedResponse = {};
const expectedResponse = {};

// Mock Grpc layer
client._innerApiCalls.longRunningRecognize = mockLongRunningGrpcMethod(
Expand All @@ -141,7 +141,7 @@ describe('SpeechClient', () => {
client
.longRunningRecognize(request)
.then(responses => {
let operation = responses[0];
const operation = responses[0];
return operation.promise();
})
.then(responses => {
Expand All @@ -154,25 +154,25 @@ describe('SpeechClient', () => {
});

it('invokes longRunningRecognize with error', done => {
let client = new speechModule.v1.SpeechClient({
const client = new speechModule.v1.SpeechClient({
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});

// Mock request
let encoding = 'FLAC';
let sampleRateHertz = 44100;
let languageCode = 'en-US';
let config = {
const encoding = 'FLAC';
const sampleRateHertz = 44100;
const languageCode = 'en-US';
const config = {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
};
let uri = 'gs://bucket_name/file_name.flac';
let audio = {
const uri = 'gs://bucket_name/file_name.flac';
const audio = {
uri: uri,
};
let request = {
const request = {
config: config,
audio: audio,
};
Expand All @@ -187,7 +187,7 @@ describe('SpeechClient', () => {
client
.longRunningRecognize(request)
.then(responses => {
let operation = responses[0];
const operation = responses[0];
return operation.promise();
})
.then(() => {
Expand All @@ -201,7 +201,7 @@ describe('SpeechClient', () => {
});

it('has longrunning decoder functions', () => {
let client = new speechModule.v1.SpeechClient({
const client = new speechModule.v1.SpeechClient({
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});
Expand Down Expand Up @@ -233,7 +233,7 @@ function mockSimpleGrpcMethod(expectedRequest, response, error) {
function mockLongRunningGrpcMethod(expectedRequest, response, error) {
return request => {
assert.deepStrictEqual(request, expectedRequest);
let mockOperation = {
const mockOperation = {
promise: function() {
return new Promise((resolve, reject) => {
if (error) {
Expand Down
Loading

0 comments on commit 8c8a78f

Please sign in to comment.