From 8e67073f070252bf24ef4660cde51d7c74319db4 Mon Sep 17 00:00:00 2001 From: Praveen Kumar Singh Date: Wed, 31 Oct 2018 17:05:40 +0530 Subject: [PATCH 1/3] replace promise with async await --- samples/analyze.v1.js | 315 ++++----- samples/analyze.v1beta2.js | 228 +++---- .../automl/automlNaturalLanguageDataset.js | 262 ++++---- samples/automl/automlNaturalLanguageModel.js | 608 +++++++++--------- .../automl/automlNaturalLanguagePredict.js | 44 +- samples/quickstart.js | 52 +- 6 files changed, 704 insertions(+), 805 deletions(-) diff --git a/samples/analyze.v1.js b/samples/analyze.v1.js index 590ddf30..5eee6647 100644 --- a/samples/analyze.v1.js +++ b/samples/analyze.v1.js @@ -15,7 +15,7 @@ 'use strict'; -function analyzeSentimentOfText(text) { +async function analyzeSentimentOfText(text) { // [START language_sentiment_text] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -35,28 +35,24 @@ function analyzeSentimentOfText(text) { }; // Detects the sentiment of the document - client - .analyzeSentiment({document: document}) - .then(results => { - const sentiment = results[0].documentSentiment; - console.log(`Document sentiment:`); - console.log(` Score: ${sentiment.score}`); - console.log(` Magnitude: ${sentiment.magnitude}`); - - const sentences = results[0].sentences; - sentences.forEach(sentence => { - console.log(`Sentence: ${sentence.text.content}`); - console.log(` Score: ${sentence.sentiment.score}`); - console.log(` Magnitude: ${sentence.sentiment.magnitude}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeSentiment({document: document}); + + const sentiment = result.documentSentiment; + console.log(`Document sentiment:`); + console.log(` Score: ${sentiment.score}`); + console.log(` Magnitude: ${sentiment.magnitude}`); + + const sentences = result.sentences; + sentences.forEach(sentence => { + console.log(`Sentence: ${sentence.text.content}`); + console.log(` Score: ${sentence.sentiment.score}`); + console.log(` Magnitude: ${sentence.sentiment.magnitude}`); + }); + // [END language_sentiment_text] } -function analyzeSentimentInFile(bucketName, fileName) { +async function analyzeSentimentInFile(bucketName, fileName) { // [START language_sentiment_gcs] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -77,28 +73,23 @@ function analyzeSentimentInFile(bucketName, fileName) { }; // Detects the sentiment of the document - client - .analyzeSentiment({document: document}) - .then(results => { - const sentiment = results[0].documentSentiment; - console.log(`Document sentiment:`); - console.log(` Score: ${sentiment.score}`); - console.log(` Magnitude: ${sentiment.magnitude}`); - - const sentences = results[0].sentences; - sentences.forEach(sentence => { - console.log(`Sentence: ${sentence.text.content}`); - console.log(` Score: ${sentence.sentiment.score}`); - console.log(` Magnitude: ${sentence.sentiment.magnitude}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeSentiment({document: document}); + + const sentiment = result.documentSentiment; + console.log(`Document sentiment:`); + console.log(` Score: ${sentiment.score}`); + console.log(` Magnitude: ${sentiment.magnitude}`); + + const sentences = result.sentences; + sentences.forEach(sentence => { + console.log(`Sentence: ${sentence.text.content}`); + console.log(` Score: ${sentence.sentiment.score}`); + console.log(` Magnitude: ${sentence.sentiment.magnitude}`); + }); // [END language_sentiment_gcs] } -function analyzeEntitiesOfText(text) { +async function analyzeEntitiesOfText(text) { // [START language_entities_text] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -118,27 +109,22 @@ function analyzeEntitiesOfText(text) { }; // Detects entities in the document - client - .analyzeEntities({document: document}) - .then(results => { - const entities = results[0].entities; - - console.log('Entities:'); - entities.forEach(entity => { - console.log(entity.name); - console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); - if (entity.metadata && entity.metadata.wikipedia_url) { - console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); - } - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeEntities({document: document}); + + const entities = result.entities; + + console.log('Entities:'); + entities.forEach(entity => { + console.log(entity.name); + console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); + if (entity.metadata && entity.metadata.wikipedia_url) { + console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); + } + }); // [END language_entities_text] } -function analyzeEntitiesInFile(bucketName, fileName) { +async function analyzeEntitiesInFile(bucketName, fileName) { // [START language_entities_gcs] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -159,27 +145,22 @@ function analyzeEntitiesInFile(bucketName, fileName) { }; // Detects entities in the document - client - .analyzeEntities({document: document}) - .then(results => { - const entities = results[0].entities; - - console.log('Entities:'); - entities.forEach(entity => { - console.log(entity.name); - console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); - if (entity.metadata && entity.metadata.wikipedia_url) { - console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); - } - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeEntities({document: document}); + const entities = result.entities; + + console.log('Entities:'); + entities.forEach(entity => { + console.log(entity.name); + console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); + if (entity.metadata && entity.metadata.wikipedia_url) { + console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); + } + }); + // [END language_entities_gcs] } -function analyzeSyntaxOfText(text) { +async function analyzeSyntaxOfText(text) { // [START language_syntax_text] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -199,24 +180,17 @@ function analyzeSyntaxOfText(text) { }; // Detects syntax in the document - client - .analyzeSyntax({document: document}) - .then(results => { - const syntax = results[0]; - - console.log('Tokens:'); - syntax.tokens.forEach(part => { - console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); - console.log(`Morphology:`, part.partOfSpeech); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [syntax] = await client.analyzeSyntax({document: document}); + + console.log('Tokens:'); + syntax.tokens.forEach(part => { + console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); + console.log(`Morphology:`, part.partOfSpeech); + }); // [END language_syntax_text] } -function analyzeSyntaxInFile(bucketName, fileName) { +async function analyzeSyntaxInFile(bucketName, fileName) { // [START language_syntax_gcs] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -237,24 +211,17 @@ function analyzeSyntaxInFile(bucketName, fileName) { }; // Detects syntax in the document - client - .analyzeSyntax({document: document}) - .then(results => { - const syntax = results[0]; - - console.log('Parts of speech:'); - syntax.tokens.forEach(part => { - console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); - console.log(`Morphology:`, part.partOfSpeech); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [syntax] = await client.analyzeSyntax({document: document}); + + console.log('Parts of speech:'); + syntax.tokens.forEach(part => { + console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); + console.log(`Morphology:`, part.partOfSpeech); + }); // [END language_syntax_gcs] } -function analyzeEntitySentimentOfText(text) { +async function analyzeEntitySentimentOfText(text) { // [START language_entity_sentiment_text] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -274,26 +241,20 @@ function analyzeEntitySentimentOfText(text) { }; // Detects sentiment of entities in the document - client - .analyzeEntitySentiment({document: document}) - .then(results => { - const entities = results[0].entities; - - console.log(`Entities and sentiments:`); - entities.forEach(entity => { - console.log(` Name: ${entity.name}`); - console.log(` Type: ${entity.type}`); - console.log(` Score: ${entity.sentiment.score}`); - console.log(` Magnitude: ${entity.sentiment.magnitude}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = client.analyzeEntitySentiment({document: document}); + const entities = result.entities; + + console.log(`Entities and sentiments:`); + entities.forEach(entity => { + console.log(` Name: ${entity.name}`); + console.log(` Type: ${entity.type}`); + console.log(` Score: ${entity.sentiment.score}`); + console.log(` Magnitude: ${entity.sentiment.magnitude}`); + }); // [END language_entity_sentiment_text] } -function analyzeEntitySentimentInFile(bucketName, fileName) { +async function analyzeEntitySentimentInFile(bucketName, fileName) { // [START language_entity_sentiment_gcs] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -314,26 +275,20 @@ function analyzeEntitySentimentInFile(bucketName, fileName) { }; // Detects sentiment of entities in the document - client - .analyzeEntitySentiment({document: document}) - .then(results => { - const entities = results[0].entities; - - console.log(`Entities and sentiments:`); - entities.forEach(entity => { - console.log(` Name: ${entity.name}`); - console.log(` Type: ${entity.type}`); - console.log(` Score: ${entity.sentiment.score}`); - console.log(` Magnitude: ${entity.sentiment.magnitude}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = client.analyzeEntitySentiment({document: document}); + const entities = result.entities; + + console.log(`Entities and sentiments:`); + entities.forEach(entity => { + console.log(` Name: ${entity.name}`); + console.log(` Type: ${entity.type}`); + console.log(` Score: ${entity.sentiment.score}`); + console.log(` Magnitude: ${entity.sentiment.magnitude}`); + }); // [END language_entity_sentiment_gcs] } -function classifyTextOfText(text) { +async function classifyTextOfText(text) { // [START language_classify_text] // Imports the Google Cloud client library const language = require('@google-cloud/language'); @@ -353,25 +308,15 @@ function classifyTextOfText(text) { }; // Classifies text in the document - client - .classifyText({document: document}) - .then(results => { - const classification = results[0]; - - console.log('Categories:'); - classification.categories.forEach(category => { - console.log( - `Name: ${category.name}, Confidence: ${category.confidence}` - ); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [classification] = await client.classifyText({document: document}); + console.log('Categories:'); + classification.categories.forEach(category => { + console.log(`Name: ${category.name}, Confidence: ${category.confidence}`); + }); // [END language_classify_text] } -function classifyTextInFile(bucketName, fileName) { +async function classifyTextInFile(bucketName, fileName) { // [START language_classify_gcs] // Imports the Google Cloud client library. const language = require('@google-cloud/language'); @@ -392,21 +337,12 @@ function classifyTextInFile(bucketName, fileName) { }; // Classifies text in the document - client - .classifyText({document: document}) - .then(results => { - const classification = results[0]; - - console.log('Categories:'); - classification.categories.forEach(category => { - console.log( - `Name: ${category.name}, Confidence: ${category.confidence}` - ); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [classification] = await client.classifyText({document: document}); + + console.log('Categories:'); + classification.categories.forEach(category => { + console.log(`Name: ${category.name}, Confidence: ${category.confidence}`); + }); // [END language_classify_gcs] } @@ -416,22 +352,31 @@ require(`yargs`) `sentiment-text `, `Detects sentiment of a string.`, {}, - opts => analyzeSentimentOfText(opts.text) + async opts => await analyzeSentimentOfText(opts.text).catch(console.error) ) .command( `sentiment-file `, `Detects sentiment in a file in Google Cloud Storage.`, {}, - opts => analyzeSentimentInFile(opts.bucketName, opts.fileName) + async opts => + await analyzeSentimentInFile(opts.bucketName, opts.fileName).catch( + console.error + ) ) - .command(`entities-text `, `Detects entities in a string.`, {}, opts => - analyzeEntitiesOfText(opts.text) + .command( + `entities-text `, + `Detects entities in a string.`, + {}, + async opts => await analyzeEntitiesOfText(opts.text).catch(console.error) ) .command( `entities-file `, `Detects entities in a file in Google Cloud Storage.`, {}, - opts => analyzeEntitiesInFile(opts.bucketName, opts.fileName) + async opts => + await analyzeEntitiesInFile(opts.bucketName, opts.fileName).catch( + console.error + ) ) .command(`syntax-text `, `Detects syntax of a string.`, {}, opts => analyzeSyntaxOfText(opts.text) @@ -440,28 +385,36 @@ require(`yargs`) `syntax-file `, `Detects syntax in a file in Google Cloud Storage.`, {}, - opts => analyzeSyntaxInFile(opts.bucketName, opts.fileName) + async opts => + await analyzeSyntaxInFile(opts.bucketName, opts.fileName).catch( + console.error + ) ) .command( `entity-sentiment-text `, `Detects sentiment of the entities in a string.`, {}, - opts => analyzeEntitySentimentOfText(opts.text) + async opts => + await analyzeEntitySentimentOfText(opts.text).catch(console.error) ) .command( `entity-sentiment-file `, `Detects sentiment of the entities in a file in Google Cloud Storage.`, {}, - opts => analyzeEntitySentimentInFile(opts.bucketName, opts.fileName) + async opts => + await analyzeEntitySentimentInFile(opts.bucketName, opts.fileName) ) - .command(`classify-text `, `Classifies text of a string.`, {}, opts => - classifyTextOfText(opts.text) + .command( + `classify-text `, + `Classifies text of a string.`, + {}, + async opts => await classifyTextOfText(opts.text) ) .command( `classify-file `, `Classifies text in a file in Google Cloud Storage.`, {}, - opts => classifyTextInFile(opts.bucketName, opts.fileName) + async opts => await classifyTextInFile(opts.bucketName, opts.fileName) ) .example( `node $0 sentiment-text "President Obama is speaking at the White House."` diff --git a/samples/analyze.v1beta2.js b/samples/analyze.v1beta2.js index 18de6fa5..158f6673 100644 --- a/samples/analyze.v1beta2.js +++ b/samples/analyze.v1beta2.js @@ -15,7 +15,7 @@ 'use strict'; -function analyzeSentimentOfText(text) { +async function analyzeSentimentOfText(text) { // [START language_sentiment_string] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -35,28 +35,22 @@ function analyzeSentimentOfText(text) { }; // Detects the sentiment of the document - client - .analyzeSentiment({document: document}) - .then(results => { - const sentiment = results[0].documentSentiment; - console.log(`Document sentiment:`); - console.log(` Score: ${sentiment.score}`); - console.log(` Magnitude: ${sentiment.magnitude}`); - - const sentences = results[0].sentences; - sentences.forEach(sentence => { - console.log(`Sentence: ${sentence.text.content}`); - console.log(` Score: ${sentence.sentiment.score}`); - console.log(` Magnitude: ${sentence.sentiment.magnitude}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeSentiment({document: document}); + const sentiment = result.documentSentiment; + console.log(`Document sentiment:`); + console.log(` Score: ${sentiment.score}`); + console.log(` Magnitude: ${sentiment.magnitude}`); + + const sentences = result.sentences; + sentences.forEach(sentence => { + console.log(`Sentence: ${sentence.text.content}`); + console.log(` Score: ${sentence.sentiment.score}`); + console.log(` Magnitude: ${sentence.sentiment.magnitude}`); + }); // [END language_sentiment_string] } -function analyzeSentimentInFile(bucketName, fileName) { +async function analyzeSentimentInFile(bucketName, fileName) { // [START language_sentiment_file] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -77,28 +71,23 @@ function analyzeSentimentInFile(bucketName, fileName) { }; // Detects the sentiment of the document - client - .analyzeSentiment({document: document}) - .then(results => { - const sentiment = results[0].documentSentiment; - console.log(`Document sentiment:`); - console.log(` Score: ${sentiment.score}`); - console.log(` Magnitude: ${sentiment.magnitude}`); - - const sentences = results[0].sentences; - sentences.forEach(sentence => { - console.log(`Sentence: ${sentence.text.content}`); - console.log(` Score: ${sentence.sentiment.score}`); - console.log(` Magnitude: ${sentence.sentiment.magnitude}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeSentiment({document: document}); + const sentiment = result.documentSentiment; + console.log(`Document sentiment:`); + console.log(` Score: ${sentiment.score}`); + console.log(` Magnitude: ${sentiment.magnitude}`); + + const sentences = result.sentences; + sentences.forEach(sentence => { + console.log(`Sentence: ${sentence.text.content}`); + console.log(` Score: ${sentence.sentiment.score}`); + console.log(` Magnitude: ${sentence.sentiment.magnitude}`); + }); + // [END language_sentiment_file] } -function analyzeEntitiesOfText(text) { +async function analyzeEntitiesOfText(text) { // [START language_entities_string] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -118,27 +107,22 @@ function analyzeEntitiesOfText(text) { }; // Detects entities in the document - client - .analyzeEntities({document: document}) - .then(results => { - const entities = results[0].entities; - - console.log('Entities:'); - entities.forEach(entity => { - console.log(entity.name); - console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); - if (entity.metadata && entity.metadata.wikipedia_url) { - console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); - } - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeEntities({document: document}); + const entities = result.entities; + + console.log('Entities:'); + entities.forEach(entity => { + console.log(entity.name); + console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); + if (entity.metadata && entity.metadata.wikipedia_url) { + console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); + } + }); + // [END language_entities_string] } -function analyzeEntitiesInFile(bucketName, fileName) { +async function analyzeEntitiesInFile(bucketName, fileName) { // [START language_entities_file] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -159,27 +143,22 @@ function analyzeEntitiesInFile(bucketName, fileName) { }; // Detects entities in the document - client - .analyzeEntities({document: document}) - .then(results => { - const entities = results[0].entities; - - console.log('Entities:'); - entities.forEach(entity => { - console.log(entity.name); - console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); - if (entity.metadata && entity.metadata.wikipedia_url) { - console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); - } - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [result] = await client.analyzeEntities({document: document}); + const entities = result.entities; + + console.log('Entities:'); + entities.forEach(entity => { + console.log(entity.name); + console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`); + if (entity.metadata && entity.metadata.wikipedia_url) { + console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`); + } + }); + // [END language_entities_file] } -function analyzeSyntaxOfText(text) { +async function analyzeSyntaxOfText(text) { // [START language_syntax_string] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -199,24 +178,18 @@ function analyzeSyntaxOfText(text) { }; // Detects syntax in the document - client - .analyzeSyntax({document: document}) - .then(results => { - const syntax = results[0]; + const [syntax] = await client.analyzeSyntax({document: document}); + + console.log('Parts of speech:'); + syntax.tokens.forEach(part => { + console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); + console.log(`Morphology:`, part.partOfSpeech); + }); - console.log('Parts of speech:'); - syntax.tokens.forEach(part => { - console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); - console.log(`Morphology:`, part.partOfSpeech); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); // [END language_syntax_string] } -function analyzeSyntaxInFile(bucketName, fileName) { +async function analyzeSyntaxInFile(bucketName, fileName) { // [START language_syntax_file] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -237,24 +210,17 @@ function analyzeSyntaxInFile(bucketName, fileName) { }; // Detects syntax in the document - client - .analyzeSyntax({document: document}) - .then(results => { - const syntax = results[0]; + const [syntax] = await client.analyzeSyntax({document: document}); - console.log('Parts of speech:'); - syntax.tokens.forEach(part => { - console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); - console.log(`Morphology:`, part.partOfSpeech); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + console.log('Parts of speech:'); + syntax.tokens.forEach(part => { + console.log(`${part.partOfSpeech.tag}: ${part.text.content}`); + console.log(`Morphology:`, part.partOfSpeech); + }); // [END language_syntax_file] } -function classifyTextOfText(text) { +async function classifyTextOfText(text) { // [START language_classify_string] // Imports the Google Cloud client library const language = require('@google-cloud/language').v1beta2; @@ -274,21 +240,11 @@ function classifyTextOfText(text) { }; // Classifies text in the document - client - .classifyText({document: document}) - .then(results => { - const classification = results[0]; - - console.log('Categories:'); - classification.categories.forEach(category => { - console.log( - `Name: ${category.name}, Confidence: ${category.confidence}` - ); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [classification] = await client.classifyText({document: document}); + console.log('Categories:'); + classification.categories.forEach(category => { + console.log(`Name: ${category.name}, Confidence: ${category.confidence}`); + }); // [END language_classify_string] } @@ -337,31 +293,46 @@ require(`yargs`) `sentiment-text `, `Detects sentiment of a string.`, {}, - opts => analyzeSentimentOfText(opts.text) + async opts => await analyzeSentimentOfText(opts.text).catch(console.error) ) .command( `sentiment-file `, `Detects sentiment in a file in Google Cloud Storage.`, {}, - opts => analyzeSentimentInFile(opts.bucketName, opts.fileName) + async opts => + await analyzeSentimentInFile(opts.bucketName, opts.fileName).catch( + console.error + ) ) - .command(`entities-text `, `Detects entities in a string.`, {}, opts => - analyzeEntitiesOfText(opts.text) + .command( + `entities-text `, + `Detects entities in a string.`, + {}, + async opts => await analyzeEntitiesOfText(opts.text).catch(console.error) ) .command( `entities-file `, `Detects entities in a file in Google Cloud Storage.`, {}, - opts => analyzeEntitiesInFile(opts.bucketName, opts.fileName) + async opts => + await analyzeEntitiesInFile(opts.bucketName, opts.fileName).catch( + console.error + ) ) - .command(`syntax-text `, `Detects syntax of a string.`, {}, opts => - analyzeSyntaxOfText(opts.text) + .command( + `syntax-text `, + `Detects syntax of a string.`, + {}, + async opts => await analyzeSyntaxOfText(opts.text).catch(console.error) ) .command( `syntax-file `, `Detects syntax in a file in Google Cloud Storage.`, {}, - opts => analyzeSyntaxInFile(opts.bucketName, opts.fileName) + async opts => + await analyzeSyntaxInFile(opts.bucketName, opts.fileName).catch( + console.error + ) ) .command(`classify-text `, `Classifies text of a string.`, {}, opts => classifyTextOfText(opts.text) @@ -370,7 +341,10 @@ require(`yargs`) `classify-file `, `Classifies text in a file in Google Cloud Storage.`, {}, - opts => classifyTextInFile(opts.bucketName, opts.fileName) + async opts => + await classifyTextInFile(opts.bucketName, opts.fileName).catch( + console.error + ) ) .example( `node $0 sentiment-text "President Obama is speaking at the White House."` diff --git a/samples/automl/automlNaturalLanguageDataset.js b/samples/automl/automlNaturalLanguageDataset.js index 65c4db8b..a5d0cd10 100755 --- a/samples/automl/automlNaturalLanguageDataset.js +++ b/samples/automl/automlNaturalLanguageDataset.js @@ -23,7 +23,12 @@ `use strict`; -function createDataset(projectId, computeRegion, datasetName, multilabel) { +async function createDataset( + projectId, + computeRegion, + datasetName, + multilabel +) { // [START automl_natural_language_createDataset] const automl = require(`@google-cloud/automl`); @@ -55,31 +60,26 @@ function createDataset(projectId, computeRegion, datasetName, multilabel) { }; // Create a dataset with the dataset metadata in the region. - client - .createDataset({parent: projectLocation, dataset: myDataset}) - .then(responses => { - const dataset = responses[0]; - - // Display the dataset information. - console.log(`Dataset name: ${dataset.name}`); - console.log(`Dataset id: ${dataset.name.split(`/`).pop(-1)}`); - console.log(`Dataset display name: ${dataset.displayName}`); - console.log(`Dataset example count: ${dataset.exampleCount}`); - console.log(`Text classification type:`); - console.log( - `\t ${dataset.textClassificationDatasetMetadata.classificationType}` - ); - console.log(`Dataset create time:`); - console.log(`\tseconds: ${dataset.createTime.seconds}`); - console.log(`\tnanos: ${dataset.createTime.nanos}`); - }) - .catch(err => { - console.error(err); - }); + const [dataset] = await client.createDataset({ + parent: projectLocation, + dataset: myDataset, + }); + // Display the dataset information. + console.log(`Dataset name: ${dataset.name}`); + console.log(`Dataset id: ${dataset.name.split(`/`).pop(-1)}`); + console.log(`Dataset display name: ${dataset.displayName}`); + console.log(`Dataset example count: ${dataset.exampleCount}`); + console.log(`Text classification type:`); + console.log( + `\t ${dataset.textClassificationDatasetMetadata.classificationType}` + ); + console.log(`Dataset create time:`); + console.log(`\tseconds: ${dataset.createTime.seconds}`); + console.log(`\tnanos: ${dataset.createTime.nanos}`); // [END automl_natural_language_createDataset] } -function listDatasets(projectId, computeRegion, filter) { +async function listDatasets(projectId, computeRegion, filter) { // [START automl_natural_language_listDatasets] const automl = require(`@google-cloud/automl`); @@ -96,35 +96,30 @@ function listDatasets(projectId, computeRegion, filter) { const projectLocation = client.locationPath(projectId, computeRegion); // List all the datasets available in the region by applying filter. - client - .listDatasets({parent: projectLocation, filter: filter}) - .then(responses => { - const datasets = responses[0]; - - // Display the dataset information. - console.log(`List of datasets:`); - datasets.forEach(dataset => { - console.log(`Dataset name: ${dataset.name}`); - console.log(`Dataset id: ${dataset.name.split(`/`).pop(-1)}`); - console.log(`Dataset display name: ${dataset.displayName}`); - console.log(`Dataset example count: ${dataset.exampleCount}`); - console.log(`Text classification type:`); - console.log( - `\t ${dataset.textClassificationDatasetMetadata.classificationType}` - ); - console.log(`Dataset create time: `); - console.log(`\tseconds: ${dataset.createTime.seconds}`); - console.log(`\tnanos: ${dataset.createTime.nanos}`); - console.log(`\n`); - }); - }) - .catch(err => { - console.error(err); - }); + const [datasets] = await client.listDatasets({ + parent: projectLocation, + filter: filter, + }); + // Display the dataset information. + console.log(`List of datasets:`); + datasets.forEach(dataset => { + console.log(`Dataset name: ${dataset.name}`); + console.log(`Dataset id: ${dataset.name.split(`/`).pop(-1)}`); + console.log(`Dataset display name: ${dataset.displayName}`); + console.log(`Dataset example count: ${dataset.exampleCount}`); + console.log(`Text classification type:`); + console.log( + `\t ${dataset.textClassificationDatasetMetadata.classificationType}` + ); + console.log(`Dataset create time: `); + console.log(`\tseconds: ${dataset.createTime.seconds}`); + console.log(`\tnanos: ${dataset.createTime.nanos}`); + console.log(`\n`); + }); // [END automl_natural_language_listDatasets] } -function getDataset(projectId, computeRegion, datasetId) { +async function getDataset(projectId, computeRegion, datasetId) { // [START automl_natural_language_getDataset] const automl = require(`@google-cloud/automl`); @@ -141,32 +136,24 @@ function getDataset(projectId, computeRegion, datasetId) { const datasetFullId = client.datasetPath(projectId, computeRegion, datasetId); // Get complete detail of the dataset. - client - .getDataset({name: datasetFullId}) - .then(responses => { - const dataset = responses[0]; - - // Display the dataset information. - console.log(`Dataset name: ${dataset.name}`); - console.log(`Dataset id: ${dataset.name.split(`/`).pop(-1)}`); - console.log(`Dataset display name: ${dataset.displayName}`); - console.log(`Dataset example count: ${dataset.exampleCount}`); - console.log( - `Text classification type: ${ - dataset.textClassificationDatasetMetadata.classificationType - }` - ); - console.log(`Dataset create time: `); - console.log(`\tseconds: ${dataset.createTime.seconds}`); - console.log(`\tnanos: ${dataset.createTime.nanos}`); - }) - .catch(err => { - console.error(err); - }); + const [dataset] = await client.getDataset({name: datasetFullId}); + // Display the dataset information. + console.log(`Dataset name: ${dataset.name}`); + console.log(`Dataset id: ${dataset.name.split(`/`).pop(-1)}`); + console.log(`Dataset display name: ${dataset.displayName}`); + console.log(`Dataset example count: ${dataset.exampleCount}`); + console.log( + `Text classification type: ${ + dataset.textClassificationDatasetMetadata.classificationType + }` + ); + console.log(`Dataset create time: `); + console.log(`\tseconds: ${dataset.createTime.seconds}`); + console.log(`\tnanos: ${dataset.createTime.nanos}`); // [END automl_natural_language_getDataset] } -function importData(projectId, computeRegion, datasetId, path) { +async function importData(projectId, computeRegion, datasetId, path) { // [START automl_natural_language_importDataset] const automl = require(`@google-cloud/automl`); @@ -192,24 +179,19 @@ function importData(projectId, computeRegion, datasetId, path) { }; // Import the dataset from the input URI. - client - .importData({name: datasetFullId, inputConfig: inputConfig}) - .then(responses => { - const operation = responses[0]; - console.log(`Processing import...`); - return operation.promise(); - }) - .then(responses => { - // The final result of the operation. - if (responses[2].done === true) console.log(`Data imported.`); - }) - .catch(err => { - console.error(err); - }); + const [operation] = client.importData({ + name: datasetFullId, + inputConfig: inputConfig, + }); + console.log(`Processing import...`); + const response = await operation.promise(); + // The final result of the operation. + if (response[2].done === true) console.log(`Data imported.`); + // [END automl_natural_language_importDataset] } -function exportData(projectId, computeRegion, datasetId, outputUri) { +async function exportData(projectId, computeRegion, datasetId, outputUri) { // [START automl_natural_language_exportDataset] const automl = require(`@google-cloud/automl`); @@ -234,24 +216,18 @@ function exportData(projectId, computeRegion, datasetId, outputUri) { }; // Export the data to the output URI. - client - .exportData({name: datasetFullId, outputConfig: outputConfig}) - .then(responses => { - const operation = responses[0]; - console.log(`Processing export...`); - return operation.promise(); - }) - .then(responses => { - // The final result of the operation. - if (responses[2].done === true) console.log(`Data exported.`); - }) - .catch(err => { - console.error(err); - }); + const [operation] = client.exportData({ + name: datasetFullId, + outputConfig: outputConfig, + }); + console.log(`Processing export...`); + const response = await operation.promise(); + // The final result of the operation. + if (response[2].done === true) console.log(`Data exported.`); // [END automl_natural_language_exportDataset] } -function deleteDataset(projectId, computeRegion, datasetId) { +async function deleteDataset(projectId, computeRegion, datasetId) { // [START automl_natural_language_deleteDataset] const automl = require(`@google-cloud/automl`); @@ -268,19 +244,10 @@ function deleteDataset(projectId, computeRegion, datasetId) { const datasetFullId = client.datasetPath(projectId, computeRegion, datasetId); // Delete a dataset. - client - .deleteDataset({name: datasetFullId}) - .then(responses => { - const operation = responses[0]; - return operation.promise(); - }) - .then(responses => { - // The final result of the operation. - if (responses[2].done === true) console.log(`Dataset deleted.`); - }) - .catch(err => { - console.error(err); - }); + const [operation] = await client.deleteDataset({name: datasetFullId}); + const response = await operation.promise(); + // The final result of the operation. + if (response[2].done === true) console.log(`Dataset deleted.`); // [END automl_natural_language_deleteDataset] } @@ -345,32 +312,63 @@ require(`yargs`) description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, }, }) - .command(`create-dataset`, `creates a new Dataset`, {}, opts => - createDataset( - opts.projectId, - opts.computeRegion, - opts.datasetName, - opts.multilabel - ) + .command( + `create-dataset`, + `creates a new Dataset`, + {}, + async opts => + await createDataset( + opts.projectId, + opts.computeRegion, + opts.datasetName, + opts.multilabel + ).catch(console.error) ) - .command(`list-datasets`, `list all Datasets`, {}, opts => - listDatasets(opts.projectId, opts.computeRegion, opts.filter) + .command( + `list-datasets`, + `list all Datasets`, + {}, + async opts => + await listDatasets(opts.projectId, opts.computeRegion, opts.filter).catch( + console.error + ) ) - .command(`get-dataset`, `Get a Dataset`, {}, opts => - getDataset(opts.projectId, opts.computeRegion, opts.datasetId) + .command( + `get-dataset`, + `Get a Dataset`, + {}, + async opts => + await getDataset( + opts.projectId, + opts.computeRegion, + opts.datasetId + ).catch(console.error) ) - .command(`delete-dataset`, `Delete a dataset`, {}, opts => - deleteDataset(opts.projectId, opts.computeRegion, opts.datasetId) + .command( + `delete-dataset`, + `Delete a dataset`, + {}, + async opts => + await deleteDataset(opts.projectId, opts.computeRegion, opts.datasetId) ) - .command(`import-data`, `Import labeled items into dataset`, {}, opts => - importData(opts.projectId, opts.computeRegion, opts.datasetId, opts.path) + .command( + `import-data`, + `Import labeled items into dataset`, + {}, + async opts => + await importData( + opts.projectId, + opts.computeRegion, + opts.datasetId, + opts.path + ) ) .command( `export-data`, `Export a dataset to a Google Cloud Storage Bucket`, {}, - opts => - exportData( + async opts => + await exportData( opts.projectId, opts.computeRegion, opts.datasetId, diff --git a/samples/automl/automlNaturalLanguageModel.js b/samples/automl/automlNaturalLanguageModel.js index a20472b8..6dcb175f 100755 --- a/samples/automl/automlNaturalLanguageModel.js +++ b/samples/automl/automlNaturalLanguageModel.js @@ -23,7 +23,7 @@ `use strict`; -function createModel(projectId, computeRegion, datasetId, modelName) { +async function createModel(projectId, computeRegion, datasetId, modelName) { // [START automl_natural_language_createModel] const automl = require(`@google-cloud/automl`); @@ -48,44 +48,33 @@ function createModel(projectId, computeRegion, datasetId, modelName) { }; // Create a model with the model metadata in the region. - client - .createModel({parent: projectLocation, model: myModel}) - .then(responses => { - const operation = responses[0]; - const initialApiResponse = responses[1]; - - console.log(`Training operation name: ${initialApiResponse.name}`); - console.log(`Training started...`); - return operation.promise(); - }) - .then(responses => { - // The final result of the operation. - const model = responses[0]; - - // Retrieve deployment state. - let deploymentState = ``; - if (model.deploymentState === 1) { - deploymentState = `deployed`; - } else if (model.deploymentState === 2) { - deploymentState = `undeployed`; - } - - // Display the model information. - console.log(`Model name: ${model.name}`); - console.log(`Model id: ${model.name.split(`/`).pop(-1)}`); - console.log(`Model display name: ${model.displayName}`); - console.log(`Model create time:`); - console.log(`\tseconds: ${model.createTime.seconds}`); - console.log(`\tnanos: ${model.createTime.nanos}`); - console.log(`Model deployment state: ${deploymentState}`); - }) - .catch(err => { - console.error(err); - }); + const [operation, initialApiResponse] = await client.createModel({ + parent: projectLocation, + model: myModel, + }); + console.log(`Training operation name: ${initialApiResponse.name}`); + console.log(`Training started...`); + const [model] = await operation.promise(); + // Retrieve deployment state. + let deploymentState = ``; + if (model.deploymentState === 1) { + deploymentState = `deployed`; + } else if (model.deploymentState === 2) { + deploymentState = `undeployed`; + } + + // Display the model information. + console.log(`Model name: ${model.name}`); + console.log(`Model id: ${model.name.split(`/`).pop(-1)}`); + console.log(`Model display name: ${model.displayName}`); + console.log(`Model create time:`); + console.log(`\tseconds: ${model.createTime.seconds}`); + console.log(`\tnanos: ${model.createTime.nanos}`); + console.log(`Model deployment state: ${deploymentState}`); // [END automl_natural_language_createModel] } -function getOperationStatus(operationFullId) { +async function getOperationStatus(operationFullId) { // [START automl_natural_language_getOperationStatus] const automl = require(`@google-cloud/automl`); @@ -98,14 +87,14 @@ function getOperationStatus(operationFullId) { // Get the latest state of a long-running operation. // Get the latest state of a long-running operation. - client.operationsClient.getOperation(operationFullId).then(responses => { - const response = responses[0]; - console.log(`Operation status: ${response}`); - }); + const [response] = await client.operationsClient.getOperation( + operationFullId + ); + console.log(`Operation status: ${response}`); // [END automl_natural_language_getOperationStatus] } -function listModels(projectId, computeRegion, filter) { +async function listModels(projectId, computeRegion, filter) { // [START automl_natural_language_listModels] const automl = require(`@google-cloud/automl`); @@ -123,80 +112,65 @@ function listModels(projectId, computeRegion, filter) { // List all the models available in the region by applying filter. if (filter === ``) filter = `textClassificationModelMetadata:*`; - client - .listModels({ - parent: projectLocation, - filter: filter, - }) - .then(responses => { - const models = responses[0]; - - // Display the model information. - console.log(`List of models:`); - models.forEach(model => { - console.log(`Model name: ${model.name}`); - console.log(`Model id: ${model.name.split(`/`).pop(-1)}`); - console.log(`Model display name: ${model.displayName}`); - console.log(`Model dataset id: ${model.datasetId}`); - if (model.modelMetadata === `translationModelMetadata`) { - console.log(`Translation model metadata:`); - console.log( - `\tBase model: ${model.translationModelMetadata.baseModel}` - ); - console.log( - `\tSource language code: ${ - model.translationModelMetadata.sourceLanguageCode - }` - ); - console.log( - `\tTarget language code: ${ - model.translationModelMetadata.targetLanguageCode - }` - ); - } else if (model.modelMetadata === `textClassificationModelMetadata`) { - console.log( - `Text classification model metadata: ${ - model.textClassificationModelMetadata - }` - ); - } else if (model.modelMetadata === `imageClassificationModelMetadata`) { - console.log(`Image classification model metadata:`); - console.log( - `\tBase model id: ${ - model.imageClassificationModelMetadata.baseModelId - }` - ); - console.log( - `\tTrain budget: ${ - model.imageClassificationModelMetadata.trainBudget - }` - ); - console.log( - `\tTrain cost: ${model.imageClassificationModelMetadata.trainCost}` - ); - console.log( - `\tStop reason: ${ - model.imageClassificationModelMetadata.stopReason - }` - ); - } - console.log(`Model create time:`); - console.log(`\tseconds: ${model.createTime.seconds}`); - console.log(`\tnanos: ${model.createTime.nanos}`); - console.log(`Model update time:`); - console.log(`\tseconds: ${model.updateTime.seconds}`); - console.log(`\tnanos: ${model.updateTime.nanos}`); - console.log(`Model deployment state: ${model.deploymentState}`); - console.log(`\n`); - }); - }) - .catch(err => { - console.error(err); - }); + const [models] = await client.listModels({ + parent: projectLocation, + filter: filter, + }); + + // Display the model information. + console.log(`List of models:`); + models.forEach(model => { + console.log(`Model name: ${model.name}`); + console.log(`Model id: ${model.name.split(`/`).pop(-1)}`); + console.log(`Model display name: ${model.displayName}`); + console.log(`Model dataset id: ${model.datasetId}`); + if (model.modelMetadata === `translationModelMetadata`) { + console.log(`Translation model metadata:`); + console.log(`\tBase model: ${model.translationModelMetadata.baseModel}`); + console.log( + `\tSource language code: ${ + model.translationModelMetadata.sourceLanguageCode + }` + ); + console.log( + `\tTarget language code: ${ + model.translationModelMetadata.targetLanguageCode + }` + ); + } else if (model.modelMetadata === `textClassificationModelMetadata`) { + console.log( + `Text classification model metadata: ${ + model.textClassificationModelMetadata + }` + ); + } else if (model.modelMetadata === `imageClassificationModelMetadata`) { + console.log(`Image classification model metadata:`); + console.log( + `\tBase model id: ${model.imageClassificationModelMetadata.baseModelId}` + ); + console.log( + `\tTrain budget: ${model.imageClassificationModelMetadata.trainBudget}` + ); + console.log( + `\tTrain cost: ${model.imageClassificationModelMetadata.trainCost}` + ); + console.log( + `\tStop reason: ${model.imageClassificationModelMetadata.stopReason}` + ); + } + console.log(`Model create time:`); + console.log(`\tseconds: ${model.createTime.seconds}`); + console.log(`\tnanos: ${model.createTime.nanos}`); + console.log(`Model update time:`); + console.log(`\tseconds: ${model.updateTime.seconds}`); + console.log(`\tnanos: ${model.updateTime.nanos}`); + console.log(`Model deployment state: ${model.deploymentState}`); + console.log(`\n`); + }); // [END automl_natural_language_listModels] } -function getModel(projectId, computeRegion, modelId) { +async function getModel(projectId, computeRegion, modelId) { // [START automl_natural_language_getModel] const automl = require(`@google-cloud/automl`); @@ -213,71 +187,63 @@ function getModel(projectId, computeRegion, modelId) { const modelFullId = client.modelPath(projectId, computeRegion, modelId); // Get complete detail of the model. - client - .getModel({name: modelFullId}) - .then(responses => { - const model = responses[0]; - - // Display the model information. - console.log(`Model name: ${model.name}`); - console.log(`Model id: ${model.name.split(`/`).pop(-1)}`); - console.log(`Model display name: ${model.displayName}`); - console.log(`Model dataset id: ${model.datasetId}`); - if (model.modelMetadata === `translationModelMetadata`) { - console.log(`Translation model metadata:`); - console.log( - `\tBase model: ${model.translationModelMetadata.baseModel}` - ); - console.log( - `\tSource language code: ${ - model.translationModelMetadata.sourceLanguageCode - }` - ); - console.log( - `\tTarget language code: ${ - model.translationModelMetadata.targetLanguageCode - }` - ); - } else if (model.modelMetadata === `textClassificationModelMetadata`) { - console.log( - `Text classification model metadata: ${ - model.textClassificationModelMetadata - }` - ); - } else if (model.modelMetadata === `imageClassificationModelMetadata`) { - console.log(`Image classification model metadata:`); - console.log( - `\tBase model id: ${ - model.imageClassificationModelMetadata.baseModelId - }` - ); - console.log( - `\tTrain budget: ${ - model.imageClassificationModelMetadata.trainBudget - }` - ); - console.log( - `\tTrain cost: ${model.imageClassificationModelMetadata.trainCost}` - ); - console.log( - `\tStop reason: ${model.imageClassificationModelMetadata.stopReason}` - ); - } - console.log(`Model create time:`); - console.log(`\tseconds: ${model.createTime.seconds}`); - console.log(`\tnanos: ${model.createTime.nanos}`); - console.log(`Model update time:`); - console.log(`\tseconds: ${model.updateTime.seconds}`); - console.log(`\tnanos: ${model.updateTime.nanos}`); - console.log(`Model deployment state: ${model.deploymentState}`); - }) - .catch(err => { - console.error(err); - }); + const [model] = await client.getModel({name: modelFullId}); + + // Display the model information. + console.log(`Model name: ${model.name}`); + console.log(`Model id: ${model.name.split(`/`).pop(-1)}`); + console.log(`Model display name: ${model.displayName}`); + console.log(`Model dataset id: ${model.datasetId}`); + if (model.modelMetadata === `translationModelMetadata`) { + console.log(`Translation model metadata:`); + console.log(`\tBase model: ${model.translationModelMetadata.baseModel}`); + console.log( + `\tSource language code: ${ + model.translationModelMetadata.sourceLanguageCode + }` + ); + console.log( + `\tTarget language code: ${ + model.translationModelMetadata.targetLanguageCode + }` + ); + } else if (model.modelMetadata === `textClassificationModelMetadata`) { + console.log( + `Text classification model metadata: ${ + model.textClassificationModelMetadata + }` + ); + } else if (model.modelMetadata === `imageClassificationModelMetadata`) { + console.log(`Image classification model metadata:`); + console.log( + `\tBase model id: ${model.imageClassificationModelMetadata.baseModelId}` + ); + console.log( + `\tTrain budget: ${model.imageClassificationModelMetadata.trainBudget}` + ); + console.log( + `\tTrain cost: ${model.imageClassificationModelMetadata.trainCost}` + ); + console.log( + `\tStop reason: ${model.imageClassificationModelMetadata.stopReason}` + ); + } + console.log(`Model create time:`); + console.log(`\tseconds: ${model.createTime.seconds}`); + console.log(`\tnanos: ${model.createTime.nanos}`); + console.log(`Model update time:`); + console.log(`\tseconds: ${model.updateTime.seconds}`); + console.log(`\tnanos: ${model.updateTime.nanos}`); + console.log(`Model deployment state: ${model.deploymentState}`); // [END automl_natural_language_getModel] } -function listModelEvaluations(projectId, computeRegion, modelId, filter_) { +async function listModelEvaluations( + projectId, + computeRegion, + modelId, + filter_ +) { // [START automl_natural_language_listModelEvaluations] const automl = require(`@google-cloud/automl`); const util = require(`util`); @@ -296,22 +262,19 @@ function listModelEvaluations(projectId, computeRegion, modelId, filter_) { const modelFullId = client.modelPath(projectId, computeRegion, modelId); // List all the model evaluations in the model by applying filter. - client - .listModelEvaluations({parent: modelFullId, filter: filter_}) - .then(responses => { - const elements = responses[0]; - console.log(`List of model evaluations:`); - elements.forEach(element => { - console.log(util.inspect(element, false, null)); - }); - }) - .catch(err => { - console.error(err); - }); + const [elements] = await client.listModelEvaluations({ + parent: modelFullId, + filter: filter_, + }); + console.log(`List of model evaluations:`); + elements.forEach(element => { + console.log(util.inspect(element, false, null)); + }); + // [END automl_natural_language_listModelEvaluations] } -function getModelEvaluation( +async function getModelEvaluation( projectId, computeRegion, modelId, @@ -340,19 +303,14 @@ function getModelEvaluation( ); // Get complete detail of the model evaluation. - client - .getModelEvaluation({name: modelEvaluationFullId}) - .then(responses => { - const response = responses[0]; - console.log(util.inspect(response, false, null)); - }) - .catch(err => { - console.error(err); - }); + const [response] = await client.getModelEvaluation({ + name: modelEvaluationFullId, + }); + console.log(util.inspect(response, false, null)); // [END automl_natural_language_getModelEvaluation] } -function displayEvaluation(projectId, computeRegion, modelId, filter) { +async function displayEvaluation(projectId, computeRegion, modelId, filter) { // [START automl_natural_language_displayEvaluation] const automl = require(`@google-cloud/automl`); const math = require(`mathjs`); @@ -371,83 +329,72 @@ function displayEvaluation(projectId, computeRegion, modelId, filter) { const modelFullId = client.modelPath(projectId, computeRegion, modelId); // List all the model evaluations in the model by applying filter. - client - .listModelEvaluations({parent: modelFullId, filter: filter}) - .then(respond => { - const response = respond[0]; - response.forEach(element => { - // There is evaluation for each class in a model and for overall model. - // Get only the evaluation of overall model. - if (!element.annotationSpecId) { - const modelEvaluationId = element.name.split(`/`).pop(-1); - - // Resource name for the model evaluation. - const modelEvaluationFullId = client.modelEvaluationPath( - projectId, - computeRegion, - modelId, - modelEvaluationId - ); + const [response] = await client.listModelEvaluations({ + parent: modelFullId, + filter: filter, + }); + response.forEach(async element => { + // There is evaluation for each class in a model and for overall model. + // Get only the evaluation of overall model. + if (!element.annotationSpecId) { + const modelEvaluationId = element.name.split(`/`).pop(-1); + + // Resource name for the model evaluation. + const modelEvaluationFullId = client.modelEvaluationPath( + projectId, + computeRegion, + modelId, + modelEvaluationId + ); + + // Get a model evaluation. + + const [modelEvaluation] = await client.getModelEvaluation({ + name: modelEvaluationFullId, + }); + + const classMetrics = modelEvaluation.classificationEvaluationMetrics; - // Get a model evaluation. - client - .getModelEvaluation({name: modelEvaluationFullId}) - .then(responses => { - const modelEvaluation = responses[0]; - - const classMetrics = - modelEvaluation.classificationEvaluationMetrics; - - const confidenceMetricsEntries = - classMetrics.confidenceMetricsEntry; - - // Showing model score based on threshold of 0.5 - confidenceMetricsEntries.forEach(confidenceMetricsEntry => { - if (confidenceMetricsEntry.confidenceThreshold === 0.5) { - console.log( - `Precision and recall are based on a score threshold of 0.5` - ); - console.log( - `Model Precision: `, - math.round(confidenceMetricsEntry.precision * 100, 2) + `%` - ); - console.log( - `Model Recall: `, - math.round(confidenceMetricsEntry.recall * 100, 2) + `%` - ); - console.log( - `Model F1 score: `, - math.round(confidenceMetricsEntry.f1Score * 100, 2) + `%` - ); - console.log( - `Model Precision@1: `, - math.round(confidenceMetricsEntry.precisionAt1 * 100, 2) + - `%` - ); - console.log( - `Model Recall@1: `, - math.round(confidenceMetricsEntry.recallAt1 * 100, 2) + `%` - ); - console.log( - `Model F1 score@1: `, - math.round(confidenceMetricsEntry.f1ScoreAt1 * 100, 2) + `%` - ); - } - }); - }) - .catch(err => { - console.error(err); - }); + const confidenceMetricsEntries = classMetrics.confidenceMetricsEntry; + + // Showing model score based on threshold of 0.5 + confidenceMetricsEntries.forEach(confidenceMetricsEntry => { + if (confidenceMetricsEntry.confidenceThreshold === 0.5) { + console.log( + `Precision and recall are based on a score threshold of 0.5` + ); + console.log( + `Model Precision: `, + math.round(confidenceMetricsEntry.precision * 100, 2) + `%` + ); + console.log( + `Model Recall: `, + math.round(confidenceMetricsEntry.recall * 100, 2) + `%` + ); + console.log( + `Model F1 score: `, + math.round(confidenceMetricsEntry.f1Score * 100, 2) + `%` + ); + console.log( + `Model Precision@1: `, + math.round(confidenceMetricsEntry.precisionAt1 * 100, 2) + `%` + ); + console.log( + `Model Recall@1: `, + math.round(confidenceMetricsEntry.recallAt1 * 100, 2) + `%` + ); + console.log( + `Model F1 score@1: `, + math.round(confidenceMetricsEntry.f1ScoreAt1 * 100, 2) + `%` + ); } }); - }) - .catch(err => { - console.error(err); - }); + } + }); // [END automl_natural_language_displayEvaluation] } -function deleteModel(projectId, computeRegion, modelId) { +async function deleteModel(projectId, computeRegion, modelId) { // [START automl_natural_language_deleteModel] const automl = require(`@google-cloud/automl`); @@ -464,19 +411,11 @@ function deleteModel(projectId, computeRegion, modelId) { const modelFullId = client.modelPath(projectId, computeRegion, modelId); // Delete a model. - client - .deleteModel({name: modelFullId}) - .then(responses => { - const operation = responses[0]; - return operation.promise(); - }) - .then(responses => { - // The final result of the operation. - if (responses[2].done === true) console.log(`Model deleted.`); - }) - .catch(err => { - console.error(err); - }); + const [operation] = client.deleteModel({name: modelFullId}); + const response = await operation.promise(); + // The final result of the operation. + if (response[2].done === true) console.log(`Model deleted.`); + // [END automl_natural_language_deleteModel] } @@ -546,53 +485,88 @@ require(`yargs`) description: `Budget for training the model`, }, }) - .command(`create-model`, `creates a new Model`, {}, opts => - createModel( - opts.projectId, - opts.computeRegion, - opts.datasetId, - opts.modelName, - opts.trainBudget - ) + .command( + `create-model`, + `creates a new Model`, + {}, + async opts => + await createModel( + opts.projectId, + opts.computeRegion, + opts.datasetId, + opts.modelName, + opts.trainBudget + ).catch(console.error) ) .command( `get-operation-status`, `Gets status of current operation`, {}, - opts => getOperationStatus(opts.operationFullId) + async opts => + await getOperationStatus(opts.operationFullId).catch(console.error) ) - .command(`list-models`, `list all Models`, {}, opts => - listModels(opts.projectId, opts.computeRegion, opts.filter) + .command( + `list-models`, + `list all Models`, + {}, + async opts => + await listModels(opts.projectId, opts.computeRegion, opts.filter).catch( + console.error + ) ) - .command(`get-model`, `Get a Model`, {}, opts => - getModel(opts.projectId, opts.computeRegion, opts.modelId) + .command( + `get-model`, + `Get a Model`, + {}, + async opts => + await getModel(opts.projectId, opts.computeRegion, opts.modelId).catch( + console.error + ) ) - .command(`list-model-evaluations`, `List model evaluations`, {}, opts => - listModelEvaluations( - opts.projectId, - opts.computeRegion, - opts.modelId, - opts.filter - ) + .command( + `list-model-evaluations`, + `List model evaluations`, + {}, + async opts => + await listModelEvaluations( + opts.projectId, + opts.computeRegion, + opts.modelId, + opts.filter + ).catch(console.error) ) - .command(`get-model-evaluation`, `Get model evaluation`, {}, opts => - getModelEvaluation( - opts.projectId, - opts.computeRegion, - opts.modelId, - opts.modelEvaluationId - ) + .command( + `get-model-evaluation`, + `Get model evaluation`, + {}, + async opts => + await getModelEvaluation( + opts.projectId, + opts.computeRegion, + opts.modelId, + opts.modelEvaluationId + ).catch(console.error) ) - .command(`display-evaluation`, `Display evaluation`, {}, opts => - displayEvaluation( - opts.projectId, - opts.computeRegion, - opts.modelId, - opts.filter - ) + .command( + `display-evaluation`, + `Display evaluation`, + {}, + async opts => + await displayEvaluation( + opts.projectId, + opts.computeRegion, + opts.modelId, + opts.filter + ).catch(console.error) ) - .command(`delete-model`, `Delete a Model`, {}, opts => - deleteModel(opts.projectId, opts.computeRegion, opts.modelId) + .command( + `delete-model`, + `Delete a Model`, + {}, + async opts => + await deleteModel(opts.projectId, opts.computeRegion, opts.modelId).catch( + console.error + ) ) .example(`node $0 create-model -i "DatasetID" -m "myModelName" -t "2"`) .example(`node $0 get-operation-status -i "datasetId" -o "OperationFullID"`) diff --git a/samples/automl/automlNaturalLanguagePredict.js b/samples/automl/automlNaturalLanguagePredict.js index bf0395e3..34887d03 100755 --- a/samples/automl/automlNaturalLanguagePredict.js +++ b/samples/automl/automlNaturalLanguagePredict.js @@ -23,7 +23,7 @@ `use strict`; -function predict(projectId, computeRegion, modelId, filePath) { +async function predict(projectId, computeRegion, modelId, filePath) { // [START automl_natural_language_predict] const automl = require(`@google-cloud/automl`); const fs = require(`fs`); @@ -55,18 +55,16 @@ function predict(projectId, computeRegion, modelId, filePath) { // Params is additional domain-specific parameters. // Currently there is no additional parameters supported. - client - .predict({name: modelFullId, payload: payload, params: {}}) - .then(responses => { - console.log(`Prediction results:`); - responses[0].payload.forEach(result => { - console.log(`Predicted class name: ${result.displayName}`); - console.log(`Predicted class score: ${result.classification.score}`); - }); - }) - .catch(err => { - console.error(err); - }); + const [response] = await client.predict({ + name: modelFullId, + payload: payload, + params: {}, + }); + console.log(`Prediction results:`); + response[0].payload.forEach(result => { + console.log(`Predicted class name: ${result.displayName}`); + console.log(`Predicted class score: ${result.classification.score}`); + }); // [END automl_natural_language_predict] } @@ -110,14 +108,18 @@ require(`yargs`) `only produce results that have at least this confidence score threshold. Default is .5`, }, }) - .command(`predict`, `classify the content`, {}, opts => - predict( - opts.projectId, - opts.computeRegion, - opts.modelId, - opts.filePath, - opts.scoreThreshold - ) + .command( + `predict`, + `classify the content`, + {}, + async opts => + await predict( + opts.projectId, + opts.computeRegion, + opts.modelId, + opts.filePath, + opts.scoreThreshold + ).catch(console.error) ) .example(`node $0 predict -i "modelId" -f "./resources/test.txt" -s "0.5"`) .wrap(120) diff --git a/samples/quickstart.js b/samples/quickstart.js index b1b5683b..eaae8c5e 100644 --- a/samples/quickstart.js +++ b/samples/quickstart.js @@ -16,31 +16,29 @@ 'use strict'; // [START language_quickstart] -// Imports the Google Cloud client library -const language = require('@google-cloud/language'); - -// Instantiates a client -const client = new language.LanguageServiceClient(); - -// The text to analyze -const text = 'Hello, world!'; - -const document = { - content: text, - type: 'PLAIN_TEXT', -}; - -// Detects the sentiment of the text -client - .analyzeSentiment({document: document}) - .then(results => { - const sentiment = results[0].documentSentiment; - - console.log(`Text: ${text}`); - console.log(`Sentiment score: ${sentiment.score}`); - console.log(`Sentiment magnitude: ${sentiment.magnitude}`); - }) - .catch(err => { - console.error('ERROR:', err); - }); +async function main() { + // Imports the Google Cloud client library + const language = require('@google-cloud/language'); + + // Instantiates a client + const client = new language.LanguageServiceClient(); + + // The text to analyze + const text = 'Hello, world!'; + + const document = { + content: text, + type: 'PLAIN_TEXT', + }; + + // Detects the sentiment of the text + const [result] = await client.analyzeSentiment({document: document}); + const sentiment = result.documentSentiment; + + console.log(`Text: ${text}`); + console.log(`Sentiment score: ${sentiment.score}`); + console.log(`Sentiment magnitude: ${sentiment.magnitude}`); +} + +main().catch(console.error); // [END language_quickstart] From 90319fdfd55710bbf1808e7fab319688b605dbd1 Mon Sep 17 00:00:00 2001 From: Praveen Kumar Singh Date: Thu, 15 Nov 2018 23:02:30 +0530 Subject: [PATCH 2/3] removed async await from yargs command --- samples/analyze.v1.js | 214 +++++++-------- samples/analyze.v1beta2.js | 178 ++++++------ .../automl/automlNaturalLanguageDataset.js | 239 ++++++++--------- samples/automl/automlNaturalLanguageModel.js | 253 ++++++++---------- .../automl/automlNaturalLanguagePredict.js | 106 ++++---- 5 files changed, 456 insertions(+), 534 deletions(-) diff --git a/samples/analyze.v1.js b/samples/analyze.v1.js index 5eee6647..3d567538 100644 --- a/samples/analyze.v1.js +++ b/samples/analyze.v1.js @@ -346,115 +346,105 @@ async function classifyTextInFile(bucketName, fileName) { // [END language_classify_gcs] } -require(`yargs`) - .demand(1) - .command( - `sentiment-text `, - `Detects sentiment of a string.`, - {}, - async opts => await analyzeSentimentOfText(opts.text).catch(console.error) - ) - .command( - `sentiment-file `, - `Detects sentiment in a file in Google Cloud Storage.`, - {}, - async opts => - await analyzeSentimentInFile(opts.bucketName, opts.fileName).catch( - console.error - ) - ) - .command( - `entities-text `, - `Detects entities in a string.`, - {}, - async opts => await analyzeEntitiesOfText(opts.text).catch(console.error) - ) - .command( - `entities-file `, - `Detects entities in a file in Google Cloud Storage.`, - {}, - async opts => - await analyzeEntitiesInFile(opts.bucketName, opts.fileName).catch( - console.error - ) - ) - .command(`syntax-text `, `Detects syntax of a string.`, {}, opts => - analyzeSyntaxOfText(opts.text) - ) - .command( - `syntax-file `, - `Detects syntax in a file in Google Cloud Storage.`, - {}, - async opts => - await analyzeSyntaxInFile(opts.bucketName, opts.fileName).catch( - console.error - ) - ) - .command( - `entity-sentiment-text `, - `Detects sentiment of the entities in a string.`, - {}, - async opts => - await analyzeEntitySentimentOfText(opts.text).catch(console.error) - ) - .command( - `entity-sentiment-file `, - `Detects sentiment of the entities in a file in Google Cloud Storage.`, - {}, - async opts => - await analyzeEntitySentimentInFile(opts.bucketName, opts.fileName) - ) - .command( - `classify-text `, - `Classifies text of a string.`, - {}, - async opts => await classifyTextOfText(opts.text) - ) - .command( - `classify-file `, - `Classifies text in a file in Google Cloud Storage.`, - {}, - async opts => await classifyTextInFile(opts.bucketName, opts.fileName) - ) - .example( - `node $0 sentiment-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 sentiment-file my-bucket file.txt`, - `Detects sentiment in gs://my-bucket/file.txt` - ) - .example( - `node $0 entities-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 entities-file my-bucket file.txt`, - `Detects entities in gs://my-bucket/file.txt` - ) - .example( - `node $0 syntax-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 syntax-file my-bucket file.txt`, - `Detects syntax in gs://my-bucket/file.txt` - ) - .example( - `node $0 entity-sentiment-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 entity-sentiment-file my-bucket file.txt`, - `Detects sentiment of entities in gs://my-bucket/file.txt` - ) - .example( - `node $0 classify-text "Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets."` - ) - .example( - `node $0 classify-file my-bucket android_text.txt`, - `Detects syntax in gs://my-bucket/android_text.txt` - ) - .wrap(120) - .recommendCommands() - .epilogue( - `For more information, see https://cloud.google.com/natural-language/docs` - ) - .help() - .strict().argv; +async function main() { + require(`yargs`) + .demand(1) + .command( + `sentiment-text `, + `Detects sentiment of a string.`, + {}, + opts => analyzeSentimentOfText(opts.text) + ) + .command( + `sentiment-file `, + `Detects sentiment in a file in Google Cloud Storage.`, + {}, + opts => analyzeSentimentInFile(opts.bucketName, opts.fileName) + ) + .command( + `entities-text `, + `Detects entities in a string.`, + {}, + opts => analyzeEntitiesOfText(opts.text) + ) + .command( + `entities-file `, + `Detects entities in a file in Google Cloud Storage.`, + {}, + opts => analyzeEntitiesInFile(opts.bucketName, opts.fileName) + ) + .command(`syntax-text `, `Detects syntax of a string.`, {}, opts => + analyzeSyntaxOfText(opts.text) + ) + .command( + `syntax-file `, + `Detects syntax in a file in Google Cloud Storage.`, + {}, + opts => analyzeSyntaxInFile(opts.bucketName, opts.fileName) + ) + .command( + `entity-sentiment-text `, + `Detects sentiment of the entities in a string.`, + {}, + opts => analyzeEntitySentimentOfText(opts.text) + ) + .command( + `entity-sentiment-file `, + `Detects sentiment of the entities in a file in Google Cloud Storage.`, + {}, + opts => analyzeEntitySentimentInFile(opts.bucketName, opts.fileName) + ) + .command(`classify-text `, `Classifies text of a string.`, {}, opts => + classifyTextOfText(opts.text) + ) + .command( + `classify-file `, + `Classifies text in a file in Google Cloud Storage.`, + {}, + opts => classifyTextInFile(opts.bucketName, opts.fileName) + ) + .example( + `node $0 sentiment-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 sentiment-file my-bucket file.txt`, + `Detects sentiment in gs://my-bucket/file.txt` + ) + .example( + `node $0 entities-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 entities-file my-bucket file.txt`, + `Detects entities in gs://my-bucket/file.txt` + ) + .example( + `node $0 syntax-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 syntax-file my-bucket file.txt`, + `Detects syntax in gs://my-bucket/file.txt` + ) + .example( + `node $0 entity-sentiment-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 entity-sentiment-file my-bucket file.txt`, + `Detects sentiment of entities in gs://my-bucket/file.txt` + ) + .example( + `node $0 classify-text "Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets."` + ) + .example( + `node $0 classify-file my-bucket android_text.txt`, + `Detects syntax in gs://my-bucket/android_text.txt` + ) + .wrap(120) + .recommendCommands() + .epilogue( + `For more information, see https://cloud.google.com/natural-language/docs` + ) + .help() + .strict().argv; +} + +main().catch(console.error); diff --git a/samples/analyze.v1beta2.js b/samples/analyze.v1beta2.js index 158f6673..a6280c63 100644 --- a/samples/analyze.v1beta2.js +++ b/samples/analyze.v1beta2.js @@ -287,97 +287,87 @@ function classifyTextInFile(bucketName, fileName) { // [END language_classify_file] } -require(`yargs`) - .demand(1) - .command( - `sentiment-text `, - `Detects sentiment of a string.`, - {}, - async opts => await analyzeSentimentOfText(opts.text).catch(console.error) - ) - .command( - `sentiment-file `, - `Detects sentiment in a file in Google Cloud Storage.`, - {}, - async opts => - await analyzeSentimentInFile(opts.bucketName, opts.fileName).catch( - console.error - ) - ) - .command( - `entities-text `, - `Detects entities in a string.`, - {}, - async opts => await analyzeEntitiesOfText(opts.text).catch(console.error) - ) - .command( - `entities-file `, - `Detects entities in a file in Google Cloud Storage.`, - {}, - async opts => - await analyzeEntitiesInFile(opts.bucketName, opts.fileName).catch( - console.error - ) - ) - .command( - `syntax-text `, - `Detects syntax of a string.`, - {}, - async opts => await analyzeSyntaxOfText(opts.text).catch(console.error) - ) - .command( - `syntax-file `, - `Detects syntax in a file in Google Cloud Storage.`, - {}, - async opts => - await analyzeSyntaxInFile(opts.bucketName, opts.fileName).catch( - console.error - ) - ) - .command(`classify-text `, `Classifies text of a string.`, {}, opts => - classifyTextOfText(opts.text) - ) - .command( - `classify-file `, - `Classifies text in a file in Google Cloud Storage.`, - {}, - async opts => - await classifyTextInFile(opts.bucketName, opts.fileName).catch( - console.error - ) - ) - .example( - `node $0 sentiment-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 sentiment-file my-bucket file.txt`, - `Detects sentiment in gs://my-bucket/file.txt` - ) - .example( - `node $0 entities-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 entities-file my-bucket file.txt`, - `Detects entities in gs://my-bucket/file.txt` - ) - .example( - `node $0 syntax-text "President Obama is speaking at the White House."` - ) - .example( - `node $0 syntax-file my-bucket file.txt`, - `Detects syntax in gs://my-bucket/file.txt` - ) - .example( - `node $0 classify-text "Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets."` - ) - .example( - `node $0 classify-file my-bucket android_text.txt`, - `Detects syntax in gs://my-bucket/android_text.txt` - ) - .wrap(120) - .recommendCommands() - .epilogue( - `For more information, see https://cloud.google.com/natural-language/docs` - ) - .help() - .strict().argv; +function main() { + require(`yargs`) + .demand(1) + .command( + `sentiment-text `, + `Detects sentiment of a string.`, + {}, + opts => analyzeSentimentOfText(opts.text) + ) + .command( + `sentiment-file `, + `Detects sentiment in a file in Google Cloud Storage.`, + {}, + opts => analyzeSentimentInFile(opts.bucketName, opts.fileName) + ) + .command( + `entities-text `, + `Detects entities in a string.`, + {}, + opts => analyzeEntitiesOfText(opts.text) + ) + .command( + `entities-file `, + `Detects entities in a file in Google Cloud Storage.`, + {}, + opts => analyzeEntitiesInFile(opts.bucketName, opts.fileName) + ) + .command(`syntax-text `, `Detects syntax of a string.`, {}, opts => + analyzeSyntaxOfText(opts.text) + ) + .command( + `syntax-file `, + `Detects syntax in a file in Google Cloud Storage.`, + {}, + opts => analyzeSyntaxInFile(opts.bucketName, opts.fileName) + ) + .command(`classify-text `, `Classifies text of a string.`, {}, opts => + classifyTextOfText(opts.text) + ) + .command( + `classify-file `, + `Classifies text in a file in Google Cloud Storage.`, + {}, + opts => + classifyTextInFile(opts.bucketName, opts.fileName).catch(console.error) + ) + .example( + `node $0 sentiment-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 sentiment-file my-bucket file.txt`, + `Detects sentiment in gs://my-bucket/file.txt` + ) + .example( + `node $0 entities-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 entities-file my-bucket file.txt`, + `Detects entities in gs://my-bucket/file.txt` + ) + .example( + `node $0 syntax-text "President Obama is speaking at the White House."` + ) + .example( + `node $0 syntax-file my-bucket file.txt`, + `Detects syntax in gs://my-bucket/file.txt` + ) + .example( + `node $0 classify-text "Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets."` + ) + .example( + `node $0 classify-file my-bucket android_text.txt`, + `Detects syntax in gs://my-bucket/android_text.txt` + ) + .wrap(120) + .recommendCommands() + .epilogue( + `For more information, see https://cloud.google.com/natural-language/docs` + ) + .help() + .strict().argv; +} + +main().catch(console.error); diff --git a/samples/automl/automlNaturalLanguageDataset.js b/samples/automl/automlNaturalLanguageDataset.js index a5d0cd10..b6563d4e 100755 --- a/samples/automl/automlNaturalLanguageDataset.js +++ b/samples/automl/automlNaturalLanguageDataset.js @@ -251,141 +251,114 @@ async function deleteDataset(projectId, computeRegion, datasetId) { // [END automl_natural_language_deleteDataset] } -require(`yargs`) - .demand(1) - .options({ - computeRegion: { - alias: `c`, - type: `string`, - default: process.env.REGION_NAME, - requiresArg: true, - description: `region name e.g. "us-central1"`, - }, - datasetName: { - alias: `n`, - type: `string`, - default: `testDataSet`, - requiresArg: true, - description: `Name of the Dataset`, - }, - datasetId: { - alias: `i`, - type: `string`, - requiresArg: true, - description: `Id of the dataset`, - }, - filter: { - alias: `f`, - default: `text_classification_dataset_metadata:*`, - type: `string`, - requiresArg: false, - description: `filter expression`, - }, - multilabel: { - alias: `m`, - type: `string`, - default: false, - requiresArg: true, - description: - `Type of the classification problem, ` + - `False - MULTICLASS, True - MULTILABEL.`, - }, - outputUri: { - alias: `o`, - type: `string`, - requiresArg: true, - description: `URI (or local path) to export dataset`, - }, - path: { - alias: `p`, - type: `string`, - global: true, - default: `gs://nodejs-docs-samples-vcm/flowerTraindataMini.csv`, - requiresArg: true, - description: `URI or local path to input .csv, or array of .csv paths`, - }, - projectId: { - alias: `z`, - type: `number`, - default: process.env.GCLOUD_PROJECT, - requiresArg: true, - description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, - }, - }) - .command( - `create-dataset`, - `creates a new Dataset`, - {}, - async opts => - await createDataset( +async function main() { + require(`yargs`) + .demand(1) + .options({ + computeRegion: { + alias: `c`, + type: `string`, + default: process.env.REGION_NAME, + requiresArg: true, + description: `region name e.g. "us-central1"`, + }, + datasetName: { + alias: `n`, + type: `string`, + default: `testDataSet`, + requiresArg: true, + description: `Name of the Dataset`, + }, + datasetId: { + alias: `i`, + type: `string`, + requiresArg: true, + description: `Id of the dataset`, + }, + filter: { + alias: `f`, + default: `text_classification_dataset_metadata:*`, + type: `string`, + requiresArg: false, + description: `filter expression`, + }, + multilabel: { + alias: `m`, + type: `string`, + default: false, + requiresArg: true, + description: + `Type of the classification problem, ` + + `False - MULTICLASS, True - MULTILABEL.`, + }, + outputUri: { + alias: `o`, + type: `string`, + requiresArg: true, + description: `URI (or local path) to export dataset`, + }, + path: { + alias: `p`, + type: `string`, + global: true, + default: `gs://nodejs-docs-samples-vcm/flowerTraindataMini.csv`, + requiresArg: true, + description: `URI or local path to input .csv, or array of .csv paths`, + }, + projectId: { + alias: `z`, + type: `number`, + default: process.env.GCLOUD_PROJECT, + requiresArg: true, + description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, + }, + }) + .command(`create-dataset`, `creates a new Dataset`, {}, opts => + createDataset( opts.projectId, opts.computeRegion, opts.datasetName, opts.multilabel - ).catch(console.error) - ) - .command( - `list-datasets`, - `list all Datasets`, - {}, - async opts => - await listDatasets(opts.projectId, opts.computeRegion, opts.filter).catch( - console.error - ) - ) - .command( - `get-dataset`, - `Get a Dataset`, - {}, - async opts => - await getDataset( - opts.projectId, - opts.computeRegion, - opts.datasetId - ).catch(console.error) - ) - .command( - `delete-dataset`, - `Delete a dataset`, - {}, - async opts => - await deleteDataset(opts.projectId, opts.computeRegion, opts.datasetId) - ) - .command( - `import-data`, - `Import labeled items into dataset`, - {}, - async opts => - await importData( - opts.projectId, - opts.computeRegion, - opts.datasetId, - opts.path ) - ) - .command( - `export-data`, - `Export a dataset to a Google Cloud Storage Bucket`, - {}, - async opts => - await exportData( - opts.projectId, - opts.computeRegion, - opts.datasetId, - opts.outputUri - ) - ) - .example(`node $0 create-dataset -n "newDataSet"`) - .example(`node $0 list-datasets -f "imageClassificationDatasetMetadata:*"`) - .example(`node $0 get-dataset -i "DATASETID"`) - .example(`node $0 delete-dataset -i "DATASETID"`) - .example( - `node $0 import-data -i "dataSetId" -p "gs://myproject/mytraindata.csv"` - ) - .example( - `node $0 export-data -i "dataSetId" -o "gs://myproject/outputdestination.csv"` - ) - .wrap(120) - .recommendCommands() - .help() - .strict().argv; + ) + .command(`list-datasets`, `list all Datasets`, {}, opts => + listDatasets(opts.projectId, opts.computeRegion, opts.filter) + ) + .command(`get-dataset`, `Get a Dataset`, {}, opts => + getDataset(opts.projectId, opts.computeRegion, opts.datasetId) + ) + .command(`delete-dataset`, `Delete a dataset`, {}, opts => + deleteDataset(opts.projectId, opts.computeRegion, opts.datasetId) + ) + .command(`import-data`, `Import labeled items into dataset`, {}, opts => + importData(opts.projectId, opts.computeRegion, opts.datasetId, opts.path) + ) + .command( + `export-data`, + `Export a dataset to a Google Cloud Storage Bucket`, + {}, + opts => + exportData( + opts.projectId, + opts.computeRegion, + opts.datasetId, + opts.outputUri + ) + ) + .example(`node $0 create-dataset -n "newDataSet"`) + .example(`node $0 list-datasets -f "imageClassificationDatasetMetadata:*"`) + .example(`node $0 get-dataset -i "DATASETID"`) + .example(`node $0 delete-dataset -i "DATASETID"`) + .example( + `node $0 import-data -i "dataSetId" -p "gs://myproject/mytraindata.csv"` + ) + .example( + `node $0 export-data -i "dataSetId" -o "gs://myproject/outputdestination.csv"` + ) + .wrap(120) + .recommendCommands() + .help() + .strict().argv; +} + +main().catch(console.error); diff --git a/samples/automl/automlNaturalLanguageModel.js b/samples/automl/automlNaturalLanguageModel.js index 6dcb175f..96492852 100755 --- a/samples/automl/automlNaturalLanguageModel.js +++ b/samples/automl/automlNaturalLanguageModel.js @@ -419,164 +419,133 @@ async function deleteModel(projectId, computeRegion, modelId) { // [END automl_natural_language_deleteModel] } -require(`yargs`) - .demand(1) - .options({ - computeRegion: { - alias: `c`, - type: `string`, - default: process.env.REGION_NAME, - requiresArg: true, - description: `region name e.g. "us-central1"`, - }, - datasetId: { - alias: `i`, - type: `string`, - requiresArg: true, - description: `Id of the dataset`, - }, - filter: { - alias: `f`, - default: ``, - type: `string`, - requiresArg: true, - description: `Name of the Dataset to search for`, - }, - modelName: { - alias: `m`, - type: `string`, - default: false, - requiresArg: true, - description: `Name of the model`, - }, - modelId: { - alias: `a`, - type: `string`, - default: ``, - requiresArg: true, - description: `Id of the model`, - }, - modelEvaluationId: { - alias: `e`, - type: `string`, - default: ``, - requiresArg: true, - description: `Id of the model evaluation`, - }, - operationFullId: { - alias: `o`, - type: `string`, - default: ``, - requiresArg: true, - description: `Full name of an operation`, - }, - projectId: { - alias: `z`, - type: `number`, - default: process.env.GCLOUD_PROJECT, - requiresArg: true, - description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, - }, - trainBudget: { - alias: `t`, - type: `string`, - default: ``, - requiresArg: true, - description: `Budget for training the model`, - }, - }) - .command( - `create-model`, - `creates a new Model`, - {}, - async opts => - await createModel( +async function main() { + require(`yargs`) + .demand(1) + .options({ + computeRegion: { + alias: `c`, + type: `string`, + default: process.env.REGION_NAME, + requiresArg: true, + description: `region name e.g. "us-central1"`, + }, + datasetId: { + alias: `i`, + type: `string`, + requiresArg: true, + description: `Id of the dataset`, + }, + filter: { + alias: `f`, + default: ``, + type: `string`, + requiresArg: true, + description: `Name of the Dataset to search for`, + }, + modelName: { + alias: `m`, + type: `string`, + default: false, + requiresArg: true, + description: `Name of the model`, + }, + modelId: { + alias: `a`, + type: `string`, + default: ``, + requiresArg: true, + description: `Id of the model`, + }, + modelEvaluationId: { + alias: `e`, + type: `string`, + default: ``, + requiresArg: true, + description: `Id of the model evaluation`, + }, + operationFullId: { + alias: `o`, + type: `string`, + default: ``, + requiresArg: true, + description: `Full name of an operation`, + }, + projectId: { + alias: `z`, + type: `number`, + default: process.env.GCLOUD_PROJECT, + requiresArg: true, + description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, + }, + trainBudget: { + alias: `t`, + type: `string`, + default: ``, + requiresArg: true, + description: `Budget for training the model`, + }, + }) + .command(`create-model`, `creates a new Model`, {}, opts => + createModel( opts.projectId, opts.computeRegion, opts.datasetId, opts.modelName, opts.trainBudget - ).catch(console.error) - ) - .command( - `get-operation-status`, - `Gets status of current operation`, - {}, - async opts => - await getOperationStatus(opts.operationFullId).catch(console.error) - ) - .command( - `list-models`, - `list all Models`, - {}, - async opts => - await listModels(opts.projectId, opts.computeRegion, opts.filter).catch( - console.error ) - ) - .command( - `get-model`, - `Get a Model`, - {}, - async opts => - await getModel(opts.projectId, opts.computeRegion, opts.modelId).catch( - console.error - ) - ) - .command( - `list-model-evaluations`, - `List model evaluations`, - {}, - async opts => - await listModelEvaluations( + ) + .command( + `get-operation-status`, + `Gets status of current operation`, + {}, + opts => getOperationStatus(opts.operationFullId) + ) + .command(`list-models`, `list all Models`, {}, opts => + listModels(opts.projectId, opts.computeRegion, opts.filter) + ) + .command(`get-model`, `Get a Model`, {}, opts => + getModel(opts.projectId, opts.computeRegion, opts.modelId) + ) + .command(`list-model-evaluations`, `List model evaluations`, {}, opts => + listModelEvaluations( opts.projectId, opts.computeRegion, opts.modelId, opts.filter - ).catch(console.error) - ) - .command( - `get-model-evaluation`, - `Get model evaluation`, - {}, - async opts => - await getModelEvaluation( + ) + ) + .command(`get-model-evaluation`, `Get model evaluation`, {}, opts => + getModelEvaluation( opts.projectId, opts.computeRegion, opts.modelId, opts.modelEvaluationId - ).catch(console.error) - ) - .command( - `display-evaluation`, - `Display evaluation`, - {}, - async opts => - await displayEvaluation( + ) + ) + .command(`display-evaluation`, `Display evaluation`, {}, opts => + displayEvaluation( opts.projectId, opts.computeRegion, opts.modelId, opts.filter - ).catch(console.error) - ) - .command( - `delete-model`, - `Delete a Model`, - {}, - async opts => - await deleteModel(opts.projectId, opts.computeRegion, opts.modelId).catch( - console.error ) - ) - .example(`node $0 create-model -i "DatasetID" -m "myModelName" -t "2"`) - .example(`node $0 get-operation-status -i "datasetId" -o "OperationFullID"`) - .example(`node $0 list-models -f "textClassificationModelMetadata:*"`) - .example(`node $0 get-model -a "ModelID"`) - .example(`node $0 list-model-evaluations -a "ModelID"`) - .example(`node $0 get-model-evaluation -a "ModelId" -e "ModelEvaluationID"`) - .example(`node $0 display-evaluation -a "ModelId"`) - .example(`node $0 delete-model -a "ModelID"`) - .wrap(120) - .recommendCommands() - .help() - .strict().argv; + ) + .command(`delete-model`, `Delete a Model`, {}, opts => + deleteModel(opts.projectId, opts.computeRegion, opts.modelId) + ) + .example(`node $0 create-model -i "DatasetID" -m "myModelName" -t "2"`) + .example(`node $0 get-operation-status -i "datasetId" -o "OperationFullID"`) + .example(`node $0 list-models -f "textClassificationModelMetadata:*"`) + .example(`node $0 get-model -a "ModelID"`) + .example(`node $0 list-model-evaluations -a "ModelID"`) + .example(`node $0 get-model-evaluation -a "ModelId" -e "ModelEvaluationID"`) + .example(`node $0 display-evaluation -a "ModelId"`) + .example(`node $0 delete-model -a "ModelID"`) + .wrap(120) + .recommendCommands() + .help() + .strict().argv; +} + +main().catch(console.error); diff --git a/samples/automl/automlNaturalLanguagePredict.js b/samples/automl/automlNaturalLanguagePredict.js index 34887d03..4a6fff31 100755 --- a/samples/automl/automlNaturalLanguagePredict.js +++ b/samples/automl/automlNaturalLanguagePredict.js @@ -68,61 +68,61 @@ async function predict(projectId, computeRegion, modelId, filePath) { // [END automl_natural_language_predict] } -require(`yargs`) - .demand(1) - .options({ - computeRegion: { - alias: `c`, - type: `string`, - default: process.env.REGION_NAME, - requiresArg: true, - description: `region name e.g. "us-central1"`, - }, - filePath: { - alias: `f`, - default: `./resources/test.txt`, - type: `string`, - requiresArg: true, - description: `local text file path of the content to be classified`, - }, - modelId: { - alias: `i`, - type: `string`, - requiresArg: true, - description: `Id of the model which will be used for text classification`, - }, - projectId: { - alias: `z`, - type: `number`, - default: process.env.GCLOUD_PROJECT, - requiresArg: true, - description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, - }, - scoreThreshold: { - alias: `s`, - type: `string`, - default: `0.5`, - requiresArg: true, - description: - `A value from 0.0 to 1.0. When the model makes predictions for an image it will` + - `only produce results that have at least this confidence score threshold. Default is .5`, - }, - }) - .command( - `predict`, - `classify the content`, - {}, - async opts => - await predict( +async function main() { + require(`yargs`) + .demand(1) + .options({ + computeRegion: { + alias: `c`, + type: `string`, + default: process.env.REGION_NAME, + requiresArg: true, + description: `region name e.g. "us-central1"`, + }, + filePath: { + alias: `f`, + default: `./resources/test.txt`, + type: `string`, + requiresArg: true, + description: `local text file path of the content to be classified`, + }, + modelId: { + alias: `i`, + type: `string`, + requiresArg: true, + description: `Id of the model which will be used for text classification`, + }, + projectId: { + alias: `z`, + type: `number`, + default: process.env.GCLOUD_PROJECT, + requiresArg: true, + description: `The GCLOUD_PROJECT string, e.g. "my-gcloud-project"`, + }, + scoreThreshold: { + alias: `s`, + type: `string`, + default: `0.5`, + requiresArg: true, + description: + `A value from 0.0 to 1.0. When the model makes predictions for an image it will` + + `only produce results that have at least this confidence score threshold. Default is .5`, + }, + }) + .command(`predict`, `classify the content`, {}, opts => + predict( opts.projectId, opts.computeRegion, opts.modelId, opts.filePath, opts.scoreThreshold - ).catch(console.error) - ) - .example(`node $0 predict -i "modelId" -f "./resources/test.txt" -s "0.5"`) - .wrap(120) - .recommendCommands() - .help() - .strict().argv; + ) + ) + .example(`node $0 predict -i "modelId" -f "./resources/test.txt" -s "0.5"`) + .wrap(120) + .recommendCommands() + .help() + .strict().argv; +} + +main().catch(console.error); From 6172a93e326e3da6bd0fc6aa73dd7df37832f4c7 Mon Sep 17 00:00:00 2001 From: Praveen Kumar Singh Date: Fri, 16 Nov 2018 13:44:24 +0530 Subject: [PATCH 3/3] implementations for PR request changes --- samples/analyze.v1.js | 20 ++++++++++---------- samples/analyze.v1beta2.js | 18 +++++++++--------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/samples/analyze.v1.js b/samples/analyze.v1.js index 3d567538..f506f78d 100644 --- a/samples/analyze.v1.js +++ b/samples/analyze.v1.js @@ -35,7 +35,7 @@ async function analyzeSentimentOfText(text) { }; // Detects the sentiment of the document - const [result] = await client.analyzeSentiment({document: document}); + const [result] = await client.analyzeSentiment({document}); const sentiment = result.documentSentiment; console.log(`Document sentiment:`); @@ -73,7 +73,7 @@ async function analyzeSentimentInFile(bucketName, fileName) { }; // Detects the sentiment of the document - const [result] = await client.analyzeSentiment({document: document}); + const [result] = await client.analyzeSentiment({document}); const sentiment = result.documentSentiment; console.log(`Document sentiment:`); @@ -109,7 +109,7 @@ async function analyzeEntitiesOfText(text) { }; // Detects entities in the document - const [result] = await client.analyzeEntities({document: document}); + const [result] = await client.analyzeEntities({document}); const entities = result.entities; @@ -145,7 +145,7 @@ async function analyzeEntitiesInFile(bucketName, fileName) { }; // Detects entities in the document - const [result] = await client.analyzeEntities({document: document}); + const [result] = await client.analyzeEntities({document}); const entities = result.entities; console.log('Entities:'); @@ -180,7 +180,7 @@ async function analyzeSyntaxOfText(text) { }; // Detects syntax in the document - const [syntax] = await client.analyzeSyntax({document: document}); + const [syntax] = await client.analyzeSyntax({document}); console.log('Tokens:'); syntax.tokens.forEach(part => { @@ -211,7 +211,7 @@ async function analyzeSyntaxInFile(bucketName, fileName) { }; // Detects syntax in the document - const [syntax] = await client.analyzeSyntax({document: document}); + const [syntax] = await client.analyzeSyntax({document}); console.log('Parts of speech:'); syntax.tokens.forEach(part => { @@ -241,7 +241,7 @@ async function analyzeEntitySentimentOfText(text) { }; // Detects sentiment of entities in the document - const [result] = client.analyzeEntitySentiment({document: document}); + const [result] = await client.analyzeEntitySentiment({document}); const entities = result.entities; console.log(`Entities and sentiments:`); @@ -275,7 +275,7 @@ async function analyzeEntitySentimentInFile(bucketName, fileName) { }; // Detects sentiment of entities in the document - const [result] = client.analyzeEntitySentiment({document: document}); + const [result] = await client.analyzeEntitySentiment({document}); const entities = result.entities; console.log(`Entities and sentiments:`); @@ -308,7 +308,7 @@ async function classifyTextOfText(text) { }; // Classifies text in the document - const [classification] = await client.classifyText({document: document}); + const [classification] = await client.classifyText({document}); console.log('Categories:'); classification.categories.forEach(category => { console.log(`Name: ${category.name}, Confidence: ${category.confidence}`); @@ -337,7 +337,7 @@ async function classifyTextInFile(bucketName, fileName) { }; // Classifies text in the document - const [classification] = await client.classifyText({document: document}); + const [classification] = await client.classifyText({document}); console.log('Categories:'); classification.categories.forEach(category => { diff --git a/samples/analyze.v1beta2.js b/samples/analyze.v1beta2.js index a6280c63..c393a165 100644 --- a/samples/analyze.v1beta2.js +++ b/samples/analyze.v1beta2.js @@ -35,7 +35,7 @@ async function analyzeSentimentOfText(text) { }; // Detects the sentiment of the document - const [result] = await client.analyzeSentiment({document: document}); + const [result] = await client.analyzeSentiment({document}); const sentiment = result.documentSentiment; console.log(`Document sentiment:`); console.log(` Score: ${sentiment.score}`); @@ -71,7 +71,7 @@ async function analyzeSentimentInFile(bucketName, fileName) { }; // Detects the sentiment of the document - const [result] = await client.analyzeSentiment({document: document}); + const [result] = await client.analyzeSentiment({document}); const sentiment = result.documentSentiment; console.log(`Document sentiment:`); console.log(` Score: ${sentiment.score}`); @@ -107,7 +107,7 @@ async function analyzeEntitiesOfText(text) { }; // Detects entities in the document - const [result] = await client.analyzeEntities({document: document}); + const [result] = await client.analyzeEntities({document}); const entities = result.entities; console.log('Entities:'); @@ -143,7 +143,7 @@ async function analyzeEntitiesInFile(bucketName, fileName) { }; // Detects entities in the document - const [result] = await client.analyzeEntities({document: document}); + const [result] = await client.analyzeEntities({document}); const entities = result.entities; console.log('Entities:'); @@ -178,7 +178,7 @@ async function analyzeSyntaxOfText(text) { }; // Detects syntax in the document - const [syntax] = await client.analyzeSyntax({document: document}); + const [syntax] = await client.analyzeSyntax({document}); console.log('Parts of speech:'); syntax.tokens.forEach(part => { @@ -210,7 +210,7 @@ async function analyzeSyntaxInFile(bucketName, fileName) { }; // Detects syntax in the document - const [syntax] = await client.analyzeSyntax({document: document}); + const [syntax] = await client.analyzeSyntax({document}); console.log('Parts of speech:'); syntax.tokens.forEach(part => { @@ -240,7 +240,7 @@ async function classifyTextOfText(text) { }; // Classifies text in the document - const [classification] = await client.classifyText({document: document}); + const [classification] = await client.classifyText({document}); console.log('Categories:'); classification.categories.forEach(category => { console.log(`Name: ${category.name}, Confidence: ${category.confidence}`); @@ -270,7 +270,7 @@ function classifyTextInFile(bucketName, fileName) { // Classifies text in the document client - .classifyText({document: document}) + .classifyText({document}) .then(results => { const classification = results[0]; @@ -287,7 +287,7 @@ function classifyTextInFile(bucketName, fileName) { // [END language_classify_file] } -function main() { +async function main() { require(`yargs`) .demand(1) .command(