Skip to content

Commit

Permalink
Merge pull request #2858 from GoogleCloudPlatform/nodejs-vision-migra…
Browse files Browse the repository at this point in the history
…tion

chore(vision): migrate code from googleapis/nodejs-vision
  • Loading branch information
telpirion authored Nov 22, 2022
2 parents a212c83 + 2195c48 commit c5118dd
Show file tree
Hide file tree
Showing 75 changed files with 4,876 additions and 0 deletions.
68 changes: 68 additions & 0 deletions .github/workflows/vision-productSearch.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
name: vision-productSearch
on:
push:
branches:
- main
paths:
- 'vision/productSearch/**'
pull_request:
paths:
- 'vision/productSearch/**'
pull_request_target:
types: [labeled]
paths:
- 'vision/productSearch/**'
schedule:
- cron: '0 0 * * 0'
jobs:
test:
if: ${{ github.event.action != 'labeled' || github.event.label.name == 'actions:force-run' }}
runs-on: ubuntu-latest
timeout-minutes: 60
permissions:
contents: 'write'
pull-requests: 'write'
id-token: 'write'
steps:
- uses: actions/checkout@v3.1.0
with:
ref: ${{github.event.pull_request.head.sha}}
- uses: 'google-github-actions/auth@v1.0.0'
with:
workload_identity_provider: 'projects/1046198160504/locations/global/workloadIdentityPools/github-actions-pool/providers/github-actions-provider'
service_account: 'kokoro-system-test@long-door-651.iam.gserviceaccount.com'
create_credentials_file: 'true'
access_token_lifetime: 600s
- uses: actions/setup-node@v3.5.1
with:
node-version: 16
- run: npm install
working-directory: vision/productSearch
- run: npm test
working-directory: vision/productSearch
env:
MOCHA_REPORTER_SUITENAME: vision_productSearch
MOCHA_REPORTER_OUTPUT: vision_productSearch_sponge_log.xml
MOCHA_REPORTER: xunit
- if: ${{ github.event.action == 'labeled' && github.event.label.name == 'actions:force-run' }}
uses: actions/github-script@v6
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
try {
await github.rest.issues.removeLabel({
name: 'actions:force-run',
owner: 'GoogleCloudPlatform',
repo: 'nodejs-docs-samples',
issue_number: context.payload.pull_request.number
});
} catch (e) {
if (!e.message.includes('Label does not exist')) {
throw e;
}
}
- if: ${{ github.event_name == 'schedule'}}
run: |
curl https://github.com/googleapis/repo-automation-bots/releases/download/flakybot-1.1.0/flakybot -o flakybot -s -L
chmod +x ./flakybot
./flakybot --repo GoogleCloudPlatform/nodejs-docs-samples --commit_hash ${{github.sha}} --build_url https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}
69 changes: 69 additions & 0 deletions .github/workflows/vision.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
name: vision
on:
push:
branches:
- main
paths:
- 'vision/**'
pull_request:
paths:
- 'vision/**'
- '!vision/productSearch/**'
pull_request_target:
types: [labeled]
paths:
- 'vision/**'
schedule:
- cron: '0 0 * * 0'
jobs:
test:
if: ${{ github.event.action != 'labeled' || github.event.label.name == 'actions:force-run' }}
runs-on: ubuntu-latest
timeout-minutes: 60
permissions:
contents: 'write'
pull-requests: 'write'
id-token: 'write'
steps:
- uses: actions/checkout@v3.1.0
with:
ref: ${{github.event.pull_request.head.sha}}
- uses: 'google-github-actions/auth@v1.0.0'
with:
workload_identity_provider: 'projects/1046198160504/locations/global/workloadIdentityPools/github-actions-pool/providers/github-actions-provider'
service_account: 'kokoro-system-test@long-door-651.iam.gserviceaccount.com'
create_credentials_file: 'true'
access_token_lifetime: 600s
- uses: actions/setup-node@v3.5.1
with:
node-version: 16
- run: npm install
working-directory: vision
- run: npm test
working-directory: vision
env:
MOCHA_REPORTER_SUITENAME: vision
MOCHA_REPORTER_OUTPUT: vision_sponge_log.xml
MOCHA_REPORTER: xunit
- if: ${{ github.event.action == 'labeled' && github.event.label.name == 'actions:force-run' }}
uses: actions/github-script@v6
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
try {
await github.rest.issues.removeLabel({
name: 'actions:force-run',
owner: 'GoogleCloudPlatform',
repo: 'nodejs-docs-samples',
issue_number: context.payload.pull_request.number
});
} catch (e) {
if (!e.message.includes('Label does not exist')) {
throw e;
}
}
- if: ${{ github.event_name == 'schedule'}}
run: |
curl https://github.com/googleapis/repo-automation-bots/releases/download/flakybot-1.1.0/flakybot -o flakybot -s -L
chmod +x ./flakybot
./flakybot --repo GoogleCloudPlatform/nodejs-docs-samples --commit_hash ${{github.sha}} --build_url https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}
2 changes: 2 additions & 0 deletions .github/workflows/workflows.json
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,8 @@
"texttospeech",
"translate",
"video-intelligence",
"vision",
"vision/productSearch",
"contact-center-insights",
"workflows"
]
4 changes: 4 additions & 0 deletions vision/.eslintrc.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
---
rules:
no-console: off
node/no-unsupported-features/node-builtins: off
88 changes: 88 additions & 0 deletions vision/async-batch-annotate-images.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

'use strict';

function main(
inputImageUri = 'gs://cloud-samples-data/vision/label/wakeupcat.jpg',
outputUri = 'gs://YOUR_BUCKET_ID/path/to/save/results/'
) {
// [START vision_async_batch_annotate_images]
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const inputImageUri = 'gs://cloud-samples-data/vision/label/wakeupcat.jpg';
// const outputUri = 'gs://YOUR_BUCKET_ID/path/to/save/results/';

// Imports the Google Cloud client libraries
const {ImageAnnotatorClient} = require('@google-cloud/vision').v1;

// Instantiates a client
const client = new ImageAnnotatorClient();

// You can send multiple images to be annotated, this sample demonstrates how to do this with
// one image. If you want to use multiple images, you have to create a request object for each image that you want annotated.
async function asyncBatchAnnotateImages() {
// Set the type of annotation you want to perform on the image
// https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#google.cloud.vision.v1.Feature.Type
const features = [{type: 'LABEL_DETECTION'}];

// Build the image request object for that one image. Note: for additional images you have to create
// additional image request objects and store them in a list to be used below.
const imageRequest = {
image: {
source: {
imageUri: inputImageUri,
},
},
features: features,
};

// Set where to store the results for the images that will be annotated.
const outputConfig = {
gcsDestination: {
uri: outputUri,
},
batchSize: 2, // The max number of responses to output in each JSON file
};

// Add each image request object to the batch request and add the output config.
const request = {
requests: [
imageRequest, // add additional request objects here
],
outputConfig,
};

// Make the asynchronous batch request.
const [operation] = await client.asyncBatchAnnotateImages(request);

// Wait for the operation to complete
const [filesResponse] = await operation.promise();

// The output is written to GCS with the provided output_uri as prefix
const destinationUri = filesResponse.outputConfig.gcsDestination.uri;
console.log(`Output written to GCS with prefix: ${destinationUri}`);
}

asyncBatchAnnotateImages();
// [END vision_async_batch_annotate_images]
}

process.on('unhandledRejection', err => {
console.error(err.message);
process.exitCode = 1;
});

main(...process.argv.slice(2));
108 changes: 108 additions & 0 deletions vision/batch-annotate-files-gcs.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

'use strict';

function main(
gcsSourceUri = 'gs://cloud-samples-data/vision/document_understanding/kafka.pdf'
) {
// [START vision_batch_annotate_files_gcs]
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const gcsSourceUri = 'gs://cloud-samples-data/vision/document_understanding/kafka.pdf';

// Imports the Google Cloud client libraries
const {ImageAnnotatorClient} = require('@google-cloud/vision').v1;

// Instantiates a client
const client = new ImageAnnotatorClient();

// You can send multiple files to be annotated, this sample demonstrates how to do this with
// one file. If you want to use multiple files, you have to create a request object for each file that you want annotated.
async function batchAnnotateFiles() {
// First Specify the input config with the file's uri and its type.
// Supported mime_type: application/pdf, image/tiff, image/gif
// https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#inputconfig
const inputConfig = {
mimeType: 'application/pdf',
gcsSource: {
uri: gcsSourceUri,
},
};

// Set the type of annotation you want to perform on the file
// https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#google.cloud.vision.v1.Feature.Type
const features = [{type: 'DOCUMENT_TEXT_DETECTION'}];

// Build the request object for that one file. Note: for additional files you have to create
// additional file request objects and store them in a list to be used below.
// Since we are sending a file of type `application/pdf`, we can use the `pages` field to
// specify which pages to process. The service can process up to 5 pages per document file.
// https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#google.cloud.vision.v1.AnnotateFileRequest
const fileRequest = {
inputConfig: inputConfig,
features: features,
// Annotate the first two pages and the last one (max 5 pages)
// First page starts at 1, and not 0. Last page is -1.
pages: [1, 2, -1],
};

// Add each `AnnotateFileRequest` object to the batch request.
const request = {
requests: [fileRequest],
};

// Make the synchronous batch request.
const [result] = await client.batchAnnotateFiles(request);

// Process the results, just get the first result, since only one file was sent in this
// sample.
const responses = result.responses[0].responses;

for (const response of responses) {
console.log(`Full text: ${response.fullTextAnnotation.text}`);
for (const page of response.fullTextAnnotation.pages) {
for (const block of page.blocks) {
console.log(`Block confidence: ${block.confidence}`);
for (const paragraph of block.paragraphs) {
console.log(` Paragraph confidence: ${paragraph.confidence}`);
for (const word of paragraph.words) {
const symbol_texts = word.symbols.map(symbol => symbol.text);
const word_text = symbol_texts.join('');
console.log(
` Word text: ${word_text} (confidence: ${word.confidence})`
);
for (const symbol of word.symbols) {
console.log(
` Symbol: ${symbol.text} (confidence: ${symbol.confidence})`
);
}
}
}
}
}
}
}

batchAnnotateFiles();
// [END vision_batch_annotate_files_gcs]
}

process.on('unhandledRejection', err => {
console.error(err.message);
process.exitCode = 1;
});

main(...process.argv.slice(2));
Loading

0 comments on commit c5118dd

Please sign in to comment.