Skip to content
This repository has been archived by the owner on Feb 18, 2024. It is now read-only.

Commit

Permalink
added review updates
Browse files Browse the repository at this point in the history
  • Loading branch information
vijay-qlogic committed Oct 26, 2018
1 parent 2df2695 commit 1e46881
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 79 deletions.
20 changes: 6 additions & 14 deletions samples/betaFeatures.js
Original file line number Diff line number Diff line change
Expand Up @@ -156,13 +156,9 @@ async function speechTranscribeMultiChannel(fileName) {

const [response] = await client.recognize(request);
const transcription = response.results
.map(
result =>
` Channel Tag: ` +
result.channelTag +
` ` +
result.alternatives[0].transcript
)
.map(result => {
` Channel Tag: ${result.channelTag} ${result.alternatives[0].transcript}`;
})
.join('\n');
console.log(`Transcription: \n${transcription}`);
// [END speech_transcribe_multichannel_beta]
Expand Down Expand Up @@ -193,13 +189,9 @@ async function speechTranscribeMultichannelGCS(gcsUri) {

const [response] = await client.recognize(request);
const transcription = response.results
.map(
result =>
` Channel Tag: ` +
result.channelTag +
` ` +
result.alternatives[0].transcript
)
.map(result => {
` Channel Tag: ${result.channelTag} ${result.alternatives[0].transcript}`;
})
.join('\n');
console.log(`Transcription: \n${transcription}`);
// [END speech_transcribe_multichannel_gcs_beta]
Expand Down
110 changes: 47 additions & 63 deletions samples/recognize.js
Original file line number Diff line number Diff line change
Expand Up @@ -103,16 +103,12 @@ async function syncRecognizeGCS(
audio: audio,
};

try {
// Detects speech in the audio file
const [response] = await client.recognize(request);
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
console.log(`Transcription: `, transcription);
} catch (err) {
console.error('ERROR:', err);
}
// Detects speech in the audio file
const [response] = await client.recognize(request);
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
console.log(`Transcription: `, transcription);
// [END speech_transcribe_sync_gcs]
}

Expand Down Expand Up @@ -153,29 +149,25 @@ async function syncRecognizeWords(
audio: audio,
};

try {
// Detects speech in the audio file
const [{results}] = await client.recognize(request);
results.forEach(result => {
console.log(`Transcription: `, result.alternatives[0].transcript);
result.alternatives[0].words.forEach(wordInfo => {
// NOTE: If you have a time offset exceeding 2^32 seconds, use the
// wordInfo.{x}Time.seconds.high to calculate seconds.
const startSecs =
`${wordInfo.startTime.seconds}` +
`.` +
wordInfo.startTime.nanos / 100000000;
const endSecs =
`${wordInfo.endTime.seconds}` +
`.` +
wordInfo.endTime.nanos / 100000000;
console.log(`Word: ${wordInfo.word}`);
console.log(`\t ${startSecs} secs - ${endSecs} secs`);
});
// Detects speech in the audio file
const [response] = await client.recognize(request);
response.results.forEach(result => {
console.log(`Transcription: `, result.alternatives[0].transcript);
result.alternatives[0].words.forEach(wordInfo => {
// NOTE: If you have a time offset exceeding 2^32 seconds, use the
// wordInfo.{x}Time.seconds.high to calculate seconds.
const startSecs =
`${wordInfo.startTime.seconds}` +
`.` +
wordInfo.startTime.nanos / 100000000;
const endSecs =
`${wordInfo.endTime.seconds}` +
`.` +
wordInfo.endTime.nanos / 100000000;
console.log(`Word: ${wordInfo.word}`);
console.log(`\t ${startSecs} secs - ${endSecs} secs`);
});
} catch (err) {
console.error('ERROR:', err);
}
});
// [END speech_sync_recognize_words]
}

Expand Down Expand Up @@ -215,20 +207,16 @@ async function asyncRecognize(
audio: audio,
};

try {
// Detects speech in the audio file. This creates a recognition job that you
// can wait for now, or get its result later.
const [operation] = await client.longRunningRecognize(request);

// Get a Promise representation of the final result of the job
const [response] = await operation.promise();
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
console.log(`Transcription: ${transcription}`);
} catch (err) {
console.error('ERROR:', err);
}
// Detects speech in the audio file. This creates a recognition job that you
// can wait for now, or get its result later.
const [operation] = await client.longRunningRecognize(request);

// Get a Promise representation of the final result of the job
const [response] = await operation.promise();
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
console.log(`Transcription: ${transcription}`);
// [END speech_transcribe_async]
}

Expand Down Expand Up @@ -268,19 +256,15 @@ async function asyncRecognizeGCS(
audio: audio,
};

try {
// Detects speech in the audio file. This creates a recognition job that you
// can wait for now, or get its result later.
const [operation] = await client.longRunningRecognize(request);
// Get a Promise representation of the final result of the job
const [response] = await operation.promise();
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
console.log(`Transcription: ${transcription}`);
} catch (err) {
console.error('ERROR:', err);
}
// Detects speech in the audio file. This creates a recognition job that you
// can wait for now, or get its result later.
const [operation] = await client.longRunningRecognize(request);
// Get a Promise representation of the final result of the job
const [response] = await operation.promise();
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
console.log(`Transcription: ${transcription}`);
// [END speech_transcribe_async_gcs]
}

Expand Down Expand Up @@ -326,8 +310,8 @@ async function asyncRecognizeGCSWords(
const [operation] = await client.longRunningRecognize(request);

// Get a Promise representation of the final result of the job
const [{results}] = await operation.promise();
results.forEach(result => {
const [response] = await operation.promise();
response.results.forEach(result => {
console.log(`Transcription: ${result.alternatives[0].transcript}`);
result.alternatives[0].words.forEach(wordInfo => {
// NOTE: If you have a time offset exceeding 2^32 seconds, use the
Expand Down Expand Up @@ -643,8 +627,8 @@ async function syncRecognizeWithEnhancedModel(
};

// Detects speech in the audio file
const [{results}] = await client.recognize(request);
results.forEach(result => {
const [response] = await client.recognize(request);
response.results.forEach(result => {
const alternative = result.alternatives[0];
console.log(alternative.transcript);
});
Expand Down
4 changes: 2 additions & 2 deletions samples/recognize.v1p1beta1.js
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,8 @@ async function syncRecognizeWithMetaData(
};

// Detects speech in the audio file
const [{results}] = await client.recognize(request);
results.forEach(result => {
const [response] = await client.recognize(request);
response.results.forEach(result => {
const alternative = result.alternatives[0];
console.log(alternative.transcript);
});
Expand Down

0 comments on commit 1e46881

Please sign in to comment.