Skip to content
This repository has been archived by the owner on Dec 19, 2023. It is now read-only.

docs(samples): updated samples code to use async await #118

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
168 changes: 81 additions & 87 deletions samples/audioProfile.js
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,7 @@
*/

'use strict';

function synthesizeText(
async function synthesizeText(
text,
outputFile,
effectsProfileId,
Expand All @@ -27,6 +26,7 @@ function synthesizeText(
// Imports the Google Cloud client library
const speech = require('@google-cloud/text-to-speech');
const fs = require('fs');
const util = require('util');

// Creates a client
const client = new speech.TextToSpeechClient();
Expand All @@ -37,91 +37,85 @@ function synthesizeText(
audioConfig: {audioEncoding: 'MP3', effectsProfileId: effectsProfileId},
};

client.synthesizeSpeech(request, (err, response) => {
if (err) {
console.error(`ERROR:`, err);
return;
}

fs.writeFile(outputFile, response.audioContent, 'binary', err => {
if (err) {
console.error('ERROR:', err);
return;
}
console.log(`Audio content written to file: ${outputFile}`);
});
});
const [response] = await client.synthesizeSpeech(request);
const writeFile = util.promisify(fs.writeFile);
await writeFile(outputFile, response.audioContent, 'binary');
console.log(`Audio content written to file: ${outputFile}`);
// [END tts_synthesize_text_audio_profile_beta]
}

require(`yargs`)
.demand(1)
.command(
`synthesize <text>`,
`Detects speech in a local audio file.`,
{},
opts =>
synthesizeText(
opts.text,
opts.outputFile,
opts.effectsProfileId,
opts.languageCode,
opts.ssmlGender
)
)
.options({
text: {
alias: 't',
default: 'Hey Everybody! This is a test!',
global: true,
requiresArg: true,
type: 'string',
},
outputFile: {
alias: 'f',
default: './resources/test.mp3',
global: true,
requiresArg: false,
type: 'string',
},
effectsProfileId: {
alias: 'e',
default: 'telephony-class-application',
global: true,
requiresArg: true,
type: 'string',
},
languageCode: {
alias: 'l',
default: 'en-US',
global: true,
requiresArg: true,
tnodeype: 'string',
},
ssmlGender: {
alias: 'g',
default: 'FEMALE',
global: true,
requiresArg: true,
type: 'string',
},
})
.array(`effectsProfileId`)
.example(`node $0 synthesize "Enter Phrase to Test Here"`)
.example(
`node $0 synthesize "This is optimized for Phone" -f ./resources/phone.mp3 -e telephony-class-application -l en-US`
)
.example(
`node $0 synthesize "This is optimized for a Wearable, like a watch" -f ./resources/watch.mp3 -e wearable-class-device -l en-US`
)
.example(
`node $0 synthesize "This is optimized for Home Entertainment System" -f ./resources/homestereo.mp3 -e large-home-entertainment-class-device`
)
.example(
`node $0 synthesize "This is optimized for the Car" -f ./resources/car.mp3 -e large-automotive-class-device`
)
.wrap(120)
.recommendCommands()
.epilogue(`For more information, see https://cloud.google.com/speech/docs`)
.help()
.strict().argv;
async function main() {
require(`yargs`)
.demand(1)
.command(
`synthesize <text>`,
`Detects speech in a local audio file.`,
{},
opts =>
synthesizeText(
opts.text,
opts.outputFile,
opts.effectsProfileId,
opts.languageCode,
opts.ssmlGender
)
)
.options({
text: {
alias: 't',
default: 'Hey Everybody! This is a test!',
global: true,
requiresArg: true,
type: 'string',
},
outputFile: {
alias: 'f',
default: './resources/test.mp3',
global: true,
requiresArg: false,
type: 'string',
},
effectsProfileId: {
alias: 'e',
default: 'telephony-class-application',
global: true,
requiresArg: true,
type: 'string',
},
languageCode: {
alias: 'l',
default: 'en-US',
global: true,
requiresArg: true,
tnodeype: 'string',
},
ssmlGender: {
alias: 'g',
default: 'FEMALE',
global: true,
requiresArg: true,
type: 'string',
},
})
.array(`effectsProfileId`)
.example(`node $0 synthesize "Enter Phrase to Test Here"`)
.example(
`node $0 synthesize "This is optimized for Phone" -f ./resources/phone.mp3 -e telephony-class-application -l en-US`
)
.example(
`node $0 synthesize "This is optimized for a Wearable, like a watch" -f ./resources/watch.mp3 -e wearable-class-device -l en-US`
)
.example(
`node $0 synthesize "This is optimized for Home Entertainment System" -f ./resources/homestereo.mp3 -e large-home-entertainment-class-device`
)
.example(
`node $0 synthesize "This is optimized for the Car" -f ./resources/car.mp3 -e large-automotive-class-device`
)
.wrap(120)
.recommendCommands()
.epilogue(`For more information, see https://cloud.google.com/speech/docs`)
.help()
.strict().argv;
}

main().catch(console.error);
56 changes: 26 additions & 30 deletions samples/listVoices.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,44 +15,40 @@

'use strict';

function listVoices() {
async function listVoices() {
// [START tts_list_voices]
const textToSpeech = require('@google-cloud/text-to-speech');

const client = new textToSpeech.TextToSpeechClient();

client
.listVoices({})
.then(results => {
const voices = results[0].voices;
const [result] = await client.listVoices({});
const voices = result.voices;

console.log('Voices:');
voices.forEach(voice => {
console.log(`Name: ${voice.name}`);
console.log(` SSML Voice Gender: ${voice.ssmlGender}`);
console.log(
` Natural Sample Rate Hertz: ${voice.naturalSampleRateHertz}`
);
console.log(` Supported languages:`);
voice.languageCodes.forEach(languageCode => {
console.log(` ${languageCode}`);
});
});
})
.catch(err => {
console.error('ERROR:', err);
console.log('Voices:');
voices.forEach(voice => {
console.log(`Name: ${voice.name}`);
console.log(` SSML Voice Gender: ${voice.ssmlGender}`);
console.log(` Natural Sample Rate Hertz: ${voice.naturalSampleRateHertz}`);
console.log(` Supported languages:`);
voice.languageCodes.forEach(languageCode => {
console.log(` ${languageCode}`);
});
});
// [END tts_list_voices]
}

async function main() {
require(`yargs`) // eslint-disable-line
.demand(1)
.command(`list-voices`, `List supported voices.`, {}, () => listVoices())
.example(`node $0 list-voices`)
.wrap(120)
.recommendCommands()
.epilogue(
`For more information, see https://cloud.google.com/text-to-speech/docs`
)
.help()
.strict().argv;
.demand(1)
.command(`list-voices`, `List supported voices.`, {}, () => listVoices())
.example(`node $0 list-voices`)
.wrap(120)
.recommendCommands()
.epilogue(
`For more information, see https://cloud.google.com/text-to-speech/docs`
)
.help()
.strict().argv;
}

main().catch(console.error);
69 changes: 32 additions & 37 deletions samples/quickstart.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,41 +15,36 @@

'use strict';

// [START tts_quickstart]
const fs = require('fs');

// Imports the Google Cloud client library
const textToSpeech = require('@google-cloud/text-to-speech');

// Creates a client
const client = new textToSpeech.TextToSpeechClient();

// The text to synthesize
const text = 'Hello, world!';

// Construct the request
praveenqlogic marked this conversation as resolved.
Show resolved Hide resolved
const request = {
input: {text: text},
// Select the language and SSML Voice Gender (optional)
voice: {languageCode: 'en-US', ssmlGender: 'NEUTRAL'},
// Select the type of audio encoding
audioConfig: {audioEncoding: 'MP3'},
};

// Performs the Text-to-Speech request
client.synthesizeSpeech(request, (err, response) => {
if (err) {
console.error('ERROR:', err);
return;
}

async function main() {
// [START tts_quickstart]
const fs = require('fs');
const util = require('util');

// Imports the Google Cloud client library
const textToSpeech = require('@google-cloud/text-to-speech');

// Creates a client
const client = new textToSpeech.TextToSpeechClient();

// The text to synthesize
const text = 'Hello, world!';

// Construct the request
const request = {
input: {text: text},
// Select the language and SSML Voice Gender (optional)
voice: {languageCode: 'en-US', ssmlGender: 'NEUTRAL'},
// Select the type of audio encoding
audioConfig: {audioEncoding: 'MP3'},
};

// Performs the Text-to-Speech request
const [response] = await client.synthesizeSpeech(request);
// Write the binary audio content to a local file
fs.writeFile('output.mp3', response.audioContent, 'binary', err => {
if (err) {
console.error('ERROR:', err);
return;
}
console.log('Audio content written to file: output.mp3');
});
});
// [END tts_quickstart]
const writeFile = util.promisify(fs.writeFile);
await writeFile('output.mp3', response.audioContent, 'binary');
console.log('Audio content written to file: output.mp3');
// [END tts_quickstart]
}

main().catch(console.error);
Loading