From e6059fdf943036f3262bfa1061c82e0296b5415b Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 13 Dec 2022 01:11:01 +0000 Subject: [PATCH 1/2] feat: added node groups API protos PiperOrigin-RevId: 494840237 Source-Link: https://github.com/googleapis/googleapis/commit/28449ecb37897d665a5a2c6d916133b1863f0966 Source-Link: https://github.com/googleapis/googleapis-gen/commit/786b1889c07c4ead16093d375c59455a0576ee25 Copy-Tag: eyJwIjoicGFja2FnZXMvZ29vZ2xlLWNsb3VkLWRhdGFwcm9jLy5Pd2xCb3QueWFtbCIsImgiOiI3ODZiMTg4OWMwN2M0ZWFkMTYwOTNkMzc1YzU5NDU1YTA1NzZlZTI1In0= --- .../google-cloud-dataproc/v1/.eslintignore | 7 + .../google-cloud-dataproc/v1/.eslintrc.json | 3 + .../google-cloud-dataproc/v1/.gitignore | 14 + .../google-cloud-dataproc/v1/.jsdoc.js | 55 + .../google-cloud-dataproc/v1/.mocharc.js | 33 + .../google-cloud-dataproc/v1/.prettierrc.js | 22 + .../google-cloud-dataproc/v1/README.md | 1 + .../v1/linkinator.config.json | 16 + .../google-cloud-dataproc/v1/package.json | 69 + .../dataproc/v1/autoscaling_policies.proto | 366 +++ .../google/cloud/dataproc/v1/batches.proto | 372 +++ .../google/cloud/dataproc/v1/clusters.proto | 1473 ++++++++++++ .../google/cloud/dataproc/v1/jobs.proto | 951 ++++++++ .../cloud/dataproc/v1/node_groups.proto | 174 ++ .../google/cloud/dataproc/v1/operations.proto | 166 ++ .../google/cloud/dataproc/v1/shared.proto | 341 +++ .../dataproc/v1/workflow_templates.proto | 807 +++++++ ...olicy_service.create_autoscaling_policy.js | 73 + ...olicy_service.delete_autoscaling_policy.js | 68 + ...g_policy_service.get_autoscaling_policy.js | 68 + ...olicy_service.list_autoscaling_policies.js | 80 + ...olicy_service.update_autoscaling_policy.js | 61 + .../v1/batch_controller.create_batch.js | 86 + .../v1/batch_controller.delete_batch.js | 61 + .../v1/batch_controller.get_batch.js | 61 + .../v1/batch_controller.list_batches.js | 74 + .../v1/cluster_controller.create_cluster.js | 90 + .../v1/cluster_controller.delete_cluster.js | 91 + .../v1/cluster_controller.diagnose_cluster.js | 73 + .../v1/cluster_controller.get_cluster.js | 72 + .../v1/cluster_controller.list_clusters.js | 95 + .../v1/cluster_controller.start_cluster.js | 91 + .../v1/cluster_controller.stop_cluster.js | 91 + .../v1/cluster_controller.update_cluster.js | 154 ++ .../generated/v1/job_controller.cancel_job.js | 72 + .../generated/v1/job_controller.delete_job.js | 72 + .../generated/v1/job_controller.get_job.js | 72 + .../generated/v1/job_controller.list_jobs.js | 102 + .../generated/v1/job_controller.submit_job.js | 85 + .../job_controller.submit_job_as_operation.js | 86 + .../generated/v1/job_controller.update_job.js | 87 + ...node_group_controller.create_node_group.js | 88 + .../node_group_controller.get_node_group.js | 63 + ...node_group_controller.resize_node_group.js | 98 + ...pet_metadata.google.cloud.dataproc.v1.json | 1679 ++++++++++++++ ...mplate_service.create_workflow_template.js | 73 + ...mplate_service.delete_workflow_template.js | 74 + ..._template_service.get_workflow_template.js | 74 + ...ce.instantiate_inline_workflow_template.js | 84 + ...e_service.instantiate_workflow_template.js | 92 + ...emplate_service.list_workflow_templates.js | 79 + ...mplate_service.update_workflow_template.js | 62 + .../google-cloud-dataproc/v1/src/index.ts | 35 + .../v1/autoscaling_policy_service_client.ts | 1240 ++++++++++ ...oscaling_policy_service_client_config.json | 51 + ...autoscaling_policy_service_proto_list.json | 10 + .../v1/src/v1/batch_controller_client.ts | 1183 ++++++++++ .../v1/batch_controller_client_config.json | 42 + .../src/v1/batch_controller_proto_list.json | 10 + .../v1/src/v1/cluster_controller_client.ts | 1840 +++++++++++++++ .../v1/cluster_controller_client_config.json | 72 + .../src/v1/cluster_controller_proto_list.json | 10 + .../v1/src/v1/gapic_metadata.json | 453 ++++ .../google-cloud-dataproc/v1/src/v1/index.ts | 24 + .../v1/src/v1/job_controller_client.ts | 1465 ++++++++++++ .../src/v1/job_controller_client_config.json | 69 + .../v1/src/v1/job_controller_proto_list.json | 10 + .../v1/src/v1/node_group_controller_client.ts | 1113 +++++++++ .../node_group_controller_client_config.json | 38 + .../v1/node_group_controller_proto_list.json | 10 + .../v1/workflow_template_service_client.ts | 1561 +++++++++++++ ...rkflow_template_service_client_config.json | 69 + .../workflow_template_service_proto_list.json | 10 + .../system-test/fixtures/sample/src/index.js | 32 + .../system-test/fixtures/sample/src/index.ts | 62 + .../v1/system-test/install.ts | 49 + .../gapic_autoscaling_policy_service_v1.ts | 1233 ++++++++++ .../v1/test/gapic_batch_controller_v1.ts | 1183 ++++++++++ .../v1/test/gapic_cluster_controller_v1.ts | 2000 +++++++++++++++++ .../v1/test/gapic_job_controller_v1.ts | 1580 +++++++++++++ .../v1/test/gapic_node_group_controller_v1.ts | 983 ++++++++ .../gapic_workflow_template_service_v1.ts | 1557 +++++++++++++ .../google-cloud-dataproc/v1/tsconfig.json | 19 + .../v1/webpack.config.js | 64 + 84 files changed, 27378 insertions(+) create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/.eslintignore create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/.eslintrc.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/.gitignore create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/.jsdoc.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/.mocharc.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/.prettierrc.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/README.md create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/linkinator.config.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/package.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/autoscaling_policies.proto create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/batches.proto create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/clusters.proto create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/jobs.proto create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/node_groups.proto create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/operations.proto create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/shared.proto create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/workflow_templates.proto create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.create_autoscaling_policy.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.delete_autoscaling_policy.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.get_autoscaling_policy.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.list_autoscaling_policies.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.update_autoscaling_policy.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.create_batch.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.delete_batch.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.get_batch.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.list_batches.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.create_cluster.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.delete_cluster.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.diagnose_cluster.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.get_cluster.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.list_clusters.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.start_cluster.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.stop_cluster.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.update_cluster.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.cancel_job.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.delete_job.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.get_job.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.list_jobs.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job_as_operation.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.update_job.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.create_node_group.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.get_node_group.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.resize_node_group.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/snippet_metadata.google.cloud.dataproc.v1.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.create_workflow_template.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.delete_workflow_template.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.get_workflow_template.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_inline_workflow_template.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_workflow_template.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.list_workflow_templates.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.update_workflow_template.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/index.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client_config.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_proto_list.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client_config.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_proto_list.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client_config.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_proto_list.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/gapic_metadata.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/index.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client_config.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_proto_list.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_client.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_client_config.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_proto_list.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client_config.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_proto_list.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.js create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/system-test/install.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/test/gapic_autoscaling_policy_service_v1.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/test/gapic_batch_controller_v1.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/test/gapic_cluster_controller_v1.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/test/gapic_job_controller_v1.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/test/gapic_node_group_controller_v1.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/test/gapic_workflow_template_service_v1.ts create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/tsconfig.json create mode 100644 owl-bot-staging/google-cloud-dataproc/v1/webpack.config.js diff --git a/owl-bot-staging/google-cloud-dataproc/v1/.eslintignore b/owl-bot-staging/google-cloud-dataproc/v1/.eslintignore new file mode 100644 index 00000000000..cfc348ec4d1 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/.eslintignore @@ -0,0 +1,7 @@ +**/node_modules +**/.coverage +build/ +docs/ +protos/ +system-test/ +samples/generated/ diff --git a/owl-bot-staging/google-cloud-dataproc/v1/.eslintrc.json b/owl-bot-staging/google-cloud-dataproc/v1/.eslintrc.json new file mode 100644 index 00000000000..78215349546 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/.gitignore b/owl-bot-staging/google-cloud-dataproc/v1/.gitignore new file mode 100644 index 00000000000..5d32b23782f --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/.gitignore @@ -0,0 +1,14 @@ +**/*.log +**/node_modules +.coverage +coverage +.nyc_output +docs/ +out/ +build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ diff --git a/owl-bot-staging/google-cloud-dataproc/v1/.jsdoc.js b/owl-bot-staging/google-cloud-dataproc/v1/.jsdoc.js new file mode 100644 index 00000000000..2fa0c39341c --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/.jsdoc.js @@ -0,0 +1,55 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2022 Google LLC', + includeDate: false, + sourceFiles: false, + systemName: '@google-cloud/dataproc', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/owl-bot-staging/google-cloud-dataproc/v1/.mocharc.js b/owl-bot-staging/google-cloud-dataproc/v1/.mocharc.js new file mode 100644 index 00000000000..481c522b00f --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/.mocharc.js @@ -0,0 +1,33 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000 +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/owl-bot-staging/google-cloud-dataproc/v1/.prettierrc.js b/owl-bot-staging/google-cloud-dataproc/v1/.prettierrc.js new file mode 100644 index 00000000000..494e147865d --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/.prettierrc.js @@ -0,0 +1,22 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/README.md b/owl-bot-staging/google-cloud-dataproc/v1/README.md new file mode 100644 index 00000000000..3e8aa9dfae5 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/README.md @@ -0,0 +1 @@ +Dataproc: Nodejs Client diff --git a/owl-bot-staging/google-cloud-dataproc/v1/linkinator.config.json b/owl-bot-staging/google-cloud-dataproc/v1/linkinator.config.json new file mode 100644 index 00000000000..befd23c8633 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/linkinator.config.json @@ -0,0 +1,16 @@ +{ + "recurse": true, + "skip": [ + "https://codecov.io/gh/googleapis/", + "www.googleapis.com", + "img.shields.io", + "https://console.cloud.google.com/cloudshell", + "https://support.google.com" + ], + "silent": true, + "concurrency": 5, + "retry": true, + "retryErrors": true, + "retryErrorsCount": 5, + "retryErrorsJitter": 3000 +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/package.json b/owl-bot-staging/google-cloud-dataproc/v1/package.json new file mode 100644 index 00000000000..058a2a5d9ec --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/package.json @@ -0,0 +1,69 @@ +{ + "name": "@google-cloud/dataproc", + "version": "0.1.0", + "description": "Dataproc client for Node.js", + "repository": "googleapis/nodejs-dataproc", + "license": "Apache-2.0", + "author": "Google LLC", + "main": "build/src/index.js", + "files": [ + "build/src", + "build/protos" + ], + "keywords": [ + "google apis client", + "google api client", + "google apis", + "google api", + "google", + "google cloud platform", + "google cloud", + "cloud", + "google dataproc", + "dataproc", + "autoscaling policy service", + "batch controller", + "cluster controller", + "job controller", + "node group controller", + "workflow template service" + ], + "scripts": { + "clean": "gts clean", + "compile": "tsc -p . && cp -r protos build/ && minifyProtoJson", + "compile-protos": "compileProtos src", + "docs": "jsdoc -c .jsdoc.js", + "predocs-test": "npm run docs", + "docs-test": "linkinator docs", + "fix": "gts fix", + "lint": "gts check", + "prepare": "npm run compile-protos && npm run compile", + "system-test": "c8 mocha build/system-test", + "test": "c8 mocha build/test" + }, + "dependencies": { + "google-gax": "^3.5.2" + }, + "devDependencies": { + "@types/mocha": "^9.1.1", + "@types/node": "^16.11.62", + "@types/sinon": "^10.0.13", + "c8": "^7.12.0", + "gts": "^3.1.1", + "jsdoc": "^3.6.11", + "jsdoc-fresh": "^2.0.1", + "jsdoc-region-tag": "^2.0.1", + "linkinator": "^4.0.3", + "mocha": "^10.0.0", + "null-loader": "^4.0.1", + "pack-n-play": "^1.0.0-2", + "sinon": "^14.0.0", + "ts-loader": "^8.4.0", + "typescript": "^4.8.3", + "webpack": "^4.46.0", + "webpack-cli": "^4.10.0" + }, + "engines": { + "node": ">=v12" + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/autoscaling_policies.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/autoscaling_policies.proto new file mode 100644 index 00000000000..18b2f7df36d --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/autoscaling_policies.proto @@ -0,0 +1,366 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "AutoscalingPoliciesProto"; +option java_package = "com.google.cloud.dataproc.v1"; +option (google.api.resource_definition) = { + type: "dataproc.googleapis.com/Region" + pattern: "projects/{project}/regions/{region}" +}; + +// The API interface for managing autoscaling policies in the +// Dataproc API. +service AutoscalingPolicyService { + option (google.api.default_host) = "dataproc.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Creates new autoscaling policy. + rpc CreateAutoscalingPolicy(CreateAutoscalingPolicyRequest) returns (AutoscalingPolicy) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/locations/*}/autoscalingPolicies" + body: "policy" + additional_bindings { + post: "/v1/{parent=projects/*/regions/*}/autoscalingPolicies" + body: "policy" + } + }; + option (google.api.method_signature) = "parent,policy"; + } + + // Updates (replaces) autoscaling policy. + // + // Disabled check for update_mask, because all updates will be full + // replacements. + rpc UpdateAutoscalingPolicy(UpdateAutoscalingPolicyRequest) returns (AutoscalingPolicy) { + option (google.api.http) = { + put: "/v1/{policy.name=projects/*/locations/*/autoscalingPolicies/*}" + body: "policy" + additional_bindings { + put: "/v1/{policy.name=projects/*/regions/*/autoscalingPolicies/*}" + body: "policy" + } + }; + option (google.api.method_signature) = "policy"; + } + + // Retrieves autoscaling policy. + rpc GetAutoscalingPolicy(GetAutoscalingPolicyRequest) returns (AutoscalingPolicy) { + option (google.api.http) = { + get: "/v1/{name=projects/*/locations/*/autoscalingPolicies/*}" + additional_bindings { + get: "/v1/{name=projects/*/regions/*/autoscalingPolicies/*}" + } + }; + option (google.api.method_signature) = "name"; + } + + // Lists autoscaling policies in the project. + rpc ListAutoscalingPolicies(ListAutoscalingPoliciesRequest) returns (ListAutoscalingPoliciesResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/locations/*}/autoscalingPolicies" + additional_bindings { + get: "/v1/{parent=projects/*/regions/*}/autoscalingPolicies" + } + }; + option (google.api.method_signature) = "parent"; + } + + // Deletes an autoscaling policy. It is an error to delete an autoscaling + // policy that is in use by one or more clusters. + rpc DeleteAutoscalingPolicy(DeleteAutoscalingPolicyRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/locations/*/autoscalingPolicies/*}" + additional_bindings { + delete: "/v1/{name=projects/*/regions/*/autoscalingPolicies/*}" + } + }; + option (google.api.method_signature) = "name"; + } +} + +// Describes an autoscaling policy for Dataproc cluster autoscaler. +message AutoscalingPolicy { + option (google.api.resource) = { + type: "dataproc.googleapis.com/AutoscalingPolicy" + pattern: "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}" + pattern: "projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}" + }; + + // Required. The policy id. + // + // The id must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). Cannot begin or end with underscore + // or hyphen. Must consist of between 3 and 50 characters. + // + string id = 1; + + // Output only. The "resource name" of the autoscaling policy, as described + // in https://cloud.google.com/apis/design/resource_names. + // + // * For `projects.regions.autoscalingPolicies`, the resource name of the + // policy has the following format: + // `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` + // + // * For `projects.locations.autoscalingPolicies`, the resource name of the + // policy has the following format: + // `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` + string name = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Autoscaling algorithm for policy. + oneof algorithm { + BasicAutoscalingAlgorithm basic_algorithm = 3 [(google.api.field_behavior) = REQUIRED]; + } + + // Required. Describes how the autoscaler will operate for primary workers. + InstanceGroupAutoscalingPolicyConfig worker_config = 4 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Describes how the autoscaler will operate for secondary workers. + InstanceGroupAutoscalingPolicyConfig secondary_worker_config = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The labels to associate with this autoscaling policy. + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to [RFC + // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + // associated with an autoscaling policy. + map labels = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// Basic algorithm for autoscaling. +message BasicAutoscalingAlgorithm { + oneof config { + // Required. YARN autoscaling configuration. + BasicYarnAutoscalingConfig yarn_config = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // Optional. Duration between scaling events. A scaling period starts after + // the update operation from the previous event has completed. + // + // Bounds: [2m, 1d]. Default: 2m. + google.protobuf.Duration cooldown_period = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// Basic autoscaling configurations for YARN. +message BasicYarnAutoscalingConfig { + // Required. Timeout for YARN graceful decommissioning of Node Managers. + // Specifies the duration to wait for jobs to complete before forcefully + // removing workers (and potentially interrupting jobs). Only applicable to + // downscaling operations. + // + // Bounds: [0s, 1d]. + google.protobuf.Duration graceful_decommission_timeout = 5 [(google.api.field_behavior) = REQUIRED]; + + // Required. Fraction of average YARN pending memory in the last cooldown period + // for which to add workers. A scale-up factor of 1.0 will result in scaling + // up so that there is no pending memory remaining after the update (more + // aggressive scaling). A scale-up factor closer to 0 will result in a smaller + // magnitude of scaling up (less aggressive scaling). + // See [How autoscaling + // works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + // for more information. + // + // Bounds: [0.0, 1.0]. + double scale_up_factor = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Fraction of average YARN pending memory in the last cooldown period + // for which to remove workers. A scale-down factor of 1 will result in + // scaling down so that there is no available memory remaining after the + // update (more aggressive scaling). A scale-down factor of 0 disables + // removing workers, which can be beneficial for autoscaling a single job. + // See [How autoscaling + // works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + // for more information. + // + // Bounds: [0.0, 1.0]. + double scale_down_factor = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Minimum scale-up threshold as a fraction of total cluster size + // before scaling occurs. For example, in a 20-worker cluster, a threshold of + // 0.1 means the autoscaler must recommend at least a 2-worker scale-up for + // the cluster to scale. A threshold of 0 means the autoscaler will scale up + // on any recommended change. + // + // Bounds: [0.0, 1.0]. Default: 0.0. + double scale_up_min_worker_fraction = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Minimum scale-down threshold as a fraction of total cluster size + // before scaling occurs. For example, in a 20-worker cluster, a threshold of + // 0.1 means the autoscaler must recommend at least a 2 worker scale-down for + // the cluster to scale. A threshold of 0 means the autoscaler will scale down + // on any recommended change. + // + // Bounds: [0.0, 1.0]. Default: 0.0. + double scale_down_min_worker_fraction = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Configuration for the size bounds of an instance group, including its +// proportional size to other groups. +message InstanceGroupAutoscalingPolicyConfig { + // Optional. Minimum number of instances for this group. + // + // Primary workers - Bounds: [2, max_instances]. Default: 2. + // Secondary workers - Bounds: [0, max_instances]. Default: 0. + int32 min_instances = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. Maximum number of instances for this group. Required for primary + // workers. Note that by default, clusters will not use secondary workers. + // Required for secondary workers if the minimum secondary instances is set. + // + // Primary workers - Bounds: [min_instances, ). + // Secondary workers - Bounds: [min_instances, ). Default: 0. + int32 max_instances = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Weight for the instance group, which is used to determine the + // fraction of total workers in the cluster from this instance group. + // For example, if primary workers have weight 2, and secondary workers have + // weight 1, the cluster will have approximately 2 primary workers for each + // secondary worker. + // + // The cluster may not reach the specified balance if constrained + // by min/max bounds or other autoscaling settings. For example, if + // `max_instances` for secondary workers is 0, then only primary workers will + // be added. The cluster can also be out of balance when created. + // + // If weight is not set on any instance group, the cluster will default to + // equal weight for all groups: the cluster will attempt to maintain an equal + // number of workers in each group within the configured size bounds for each + // group. If weight is set for one group only, the cluster will default to + // zero weight on the unset group. For example if weight is set only on + // primary workers, the cluster will use primary workers only and no + // secondary workers. + int32 weight = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to create an autoscaling policy. +message CreateAutoscalingPolicyRequest { + // Required. The "resource name" of the region or location, as described + // in https://cloud.google.com/apis/design/resource_names. + // + // * For `projects.regions.autoscalingPolicies.create`, the resource name + // of the region has the following format: + // `projects/{project_id}/regions/{region}` + // + // * For `projects.locations.autoscalingPolicies.create`, the resource name + // of the location has the following format: + // `projects/{project_id}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "dataproc.googleapis.com/AutoscalingPolicy" + } + ]; + + // Required. The autoscaling policy to create. + AutoscalingPolicy policy = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// A request to fetch an autoscaling policy. +message GetAutoscalingPolicyRequest { + // Required. The "resource name" of the autoscaling policy, as described + // in https://cloud.google.com/apis/design/resource_names. + // + // * For `projects.regions.autoscalingPolicies.get`, the resource name + // of the policy has the following format: + // `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` + // + // * For `projects.locations.autoscalingPolicies.get`, the resource name + // of the policy has the following format: + // `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "dataproc.googleapis.com/AutoscalingPolicy" + } + ]; +} + +// A request to update an autoscaling policy. +message UpdateAutoscalingPolicyRequest { + // Required. The updated autoscaling policy. + AutoscalingPolicy policy = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// A request to delete an autoscaling policy. +// +// Autoscaling policies in use by one or more clusters will not be deleted. +message DeleteAutoscalingPolicyRequest { + // Required. The "resource name" of the autoscaling policy, as described + // in https://cloud.google.com/apis/design/resource_names. + // + // * For `projects.regions.autoscalingPolicies.delete`, the resource name + // of the policy has the following format: + // `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` + // + // * For `projects.locations.autoscalingPolicies.delete`, the resource name + // of the policy has the following format: + // `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "dataproc.googleapis.com/AutoscalingPolicy" + } + ]; +} + +// A request to list autoscaling policies in a project. +message ListAutoscalingPoliciesRequest { + // Required. The "resource name" of the region or location, as described + // in https://cloud.google.com/apis/design/resource_names. + // + // * For `projects.regions.autoscalingPolicies.list`, the resource name + // of the region has the following format: + // `projects/{project_id}/regions/{region}` + // + // * For `projects.locations.autoscalingPolicies.list`, the resource name + // of the location has the following format: + // `projects/{project_id}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "dataproc.googleapis.com/AutoscalingPolicy" + } + ]; + + // Optional. The maximum number of results to return in each response. + // Must be less than or equal to 1000. Defaults to 100. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The page token, returned by a previous call, to request the + // next page of results. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// A response to a request to list autoscaling policies in a project. +message ListAutoscalingPoliciesResponse { + // Output only. Autoscaling policies list. + repeated AutoscalingPolicy policies = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. This token is included in the response if there are more + // results to fetch. + string next_page_token = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/batches.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/batches.proto new file mode 100644 index 00000000000..eafb4e354ea --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/batches.proto @@ -0,0 +1,372 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/dataproc/v1/shared.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "BatchesProto"; +option java_package = "com.google.cloud.dataproc.v1"; + +// The BatchController provides methods to manage batch workloads. +service BatchController { + option (google.api.default_host) = "dataproc.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a batch workload that executes asynchronously. + rpc CreateBatch(CreateBatchRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/locations/*}/batches" + body: "batch" + }; + option (google.api.method_signature) = "parent,batch,batch_id"; + option (google.longrunning.operation_info) = { + response_type: "Batch" + metadata_type: "google.cloud.dataproc.v1.BatchOperationMetadata" + }; + } + + // Gets the batch workload resource representation. + rpc GetBatch(GetBatchRequest) returns (Batch) { + option (google.api.http) = { + get: "/v1/{name=projects/*/locations/*/batches/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists batch workloads. + rpc ListBatches(ListBatchesRequest) returns (ListBatchesResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/locations/*}/batches" + }; + option (google.api.method_signature) = "parent"; + } + + // Deletes the batch workload resource. If the batch is not in terminal state, + // the delete fails and the response returns `FAILED_PRECONDITION`. + rpc DeleteBatch(DeleteBatchRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/locations/*/batches/*}" + }; + option (google.api.method_signature) = "name"; + } +} + +// A request to create a batch workload. +message CreateBatchRequest { + // Required. The parent resource where this batch will be created. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "dataproc.googleapis.com/Batch" + } + ]; + + // Required. The batch to create. + Batch batch = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The ID to use for the batch, which will become the final component of + // the batch's resource name. + // + // This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`. + string batch_id = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique ID used to identify the request. If the service + // receives two + // [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s + // with the same request_id, the second request is ignored and the + // Operation that corresponds to the first Batch created and stored + // in the backend is returned. + // + // Recommendation: Set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The value must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to get the resource representation for a batch workload. +message GetBatchRequest { + // Required. The name of the batch to retrieve. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "dataproc.googleapis.com/Batch" + } + ]; +} + +// A request to list batch workloads in a project. +message ListBatchesRequest { + // Required. The parent, which owns this collection of batches. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "dataproc.googleapis.com/Batch" + } + ]; + + // Optional. The maximum number of batches to return in each response. + // The service may return fewer than this value. + // The default page size is 20; the maximum page size is 1000. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A page token received from a previous `ListBatches` call. + // Provide this token to retrieve the subsequent page. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// A list of batch workloads. +message ListBatchesResponse { + // The batches from the specified collection. + repeated Batch batches = 1; + + // A token, which can be sent as `page_token` to retrieve the next page. + // If this field is omitted, there are no subsequent pages. + string next_page_token = 2; +} + +// A request to delete a batch workload. +message DeleteBatchRequest { + // Required. The name of the batch resource to delete. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "dataproc.googleapis.com/Batch" + } + ]; +} + +// A representation of a batch workload in the service. +message Batch { + option (google.api.resource) = { + type: "dataproc.googleapis.com/Batch" + pattern: "projects/{project}/locations/{location}/batches/{batch}" + }; + + // Historical state information. + message StateHistory { + // Output only. The state of the batch at this point in history. + State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Details about the state at this point in history. + string state_message = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when the batch entered the historical state. + google.protobuf.Timestamp state_start_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // The batch state. + enum State { + // The batch state is unknown. + STATE_UNSPECIFIED = 0; + + // The batch is created before running. + PENDING = 1; + + // The batch is running. + RUNNING = 2; + + // The batch is cancelling. + CANCELLING = 3; + + // The batch cancellation was successful. + CANCELLED = 4; + + // The batch completed successfully. + SUCCEEDED = 5; + + // The batch is no longer running due to an error. + FAILED = 6; + } + + // Output only. The resource name of the batch. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A batch UUID (Unique Universal Identifier). The service + // generates this value when it creates the batch. + string uuid = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when the batch was created. + google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The application/framework-specific portion of the batch configuration. + oneof batch_config { + // Optional. PySpark batch config. + PySparkBatch pyspark_batch = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Spark batch config. + SparkBatch spark_batch = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. SparkR batch config. + SparkRBatch spark_r_batch = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. SparkSql batch config. + SparkSqlBatch spark_sql_batch = 7 [(google.api.field_behavior) = OPTIONAL]; + } + + // Output only. Runtime information about batch execution. + RuntimeInfo runtime_info = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The state of the batch. + State state = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Batch state details, such as a failure + // description if the state is `FAILED`. + string state_message = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when the batch entered a current state. + google.protobuf.Timestamp state_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The email address of the user who created the batch. + string creator = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The labels to associate with this batch. + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to [RFC + // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + // associated with a batch. + map labels = 13 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Runtime configuration for the batch execution. + RuntimeConfig runtime_config = 14 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Environment configuration for the batch execution. + EnvironmentConfig environment_config = 15 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The resource name of the operation associated with this batch. + string operation = 16 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Historical state information for the batch. + repeated StateHistory state_history = 17 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// A configuration for running an +// [Apache +// PySpark](https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) +// batch workload. +message PySparkBatch { + // Required. The HCFS URI of the main Python file to use as the Spark driver. Must + // be a .py file. + string main_python_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The arguments to pass to the driver. Do not include arguments + // that can be set as batch properties, such as `--conf`, since a collision + // can occur that causes an incorrect batch submission. + repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS file URIs of Python files to pass to the PySpark + // framework. Supported file types: `.py`, `.egg`, and `.zip`. + repeated string python_file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of jar files to add to the classpath of the + // Spark driver and tasks. + repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. + repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: + // `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`. + repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// A configuration for running an [Apache Spark](http://spark.apache.org/) +// batch workload. +message SparkBatch { + // The specification of the main method to call to drive the Spark + // workload. Specify either the jar file that contains the main class or the + // main class name. To pass both a main jar and a main class in that jar, add + // the jar to `jar_file_uris`, and then specify the main class + // name in `main_class`. + oneof driver { + // Optional. The HCFS URI of the jar file that contains the main class. + string main_jar_file_uri = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The name of the driver main class. The jar file that contains the class + // must be in the classpath or specified in `jar_file_uris`. + string main_class = 2 [(google.api.field_behavior) = OPTIONAL]; + } + + // Optional. The arguments to pass to the driver. Do not include arguments + // that can be set as batch properties, such as `--conf`, since a collision + // can occur that causes an incorrect batch submission. + repeated string args = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of jar files to add to the classpath of the + // Spark driver and tasks. + repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. + repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: + // `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`. + repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// A configuration for running an +// [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html) +// batch workload. +message SparkRBatch { + // Required. The HCFS URI of the main R file to use as the driver. + // Must be a `.R` or `.r` file. + string main_r_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The arguments to pass to the Spark driver. Do not include arguments + // that can be set as batch properties, such as `--conf`, since a collision + // can occur that causes an incorrect batch submission. + repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. + repeated string file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: + // `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`. + repeated string archive_uris = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// A configuration for running +// [Apache Spark SQL](http://spark.apache.org/sql/) queries as a batch workload. +message SparkSqlBatch { + // Required. The HCFS URI of the script that contains Spark SQL queries to execute. + string query_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Mapping of query variable names to values (equivalent to the + // Spark SQL command: `SET name="value";`). + map query_variables = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + repeated string jar_file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/clusters.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/clusters.proto new file mode 100644 index 00000000000..11611fbf680 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/clusters.proto @@ -0,0 +1,1473 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/dataproc/v1/shared.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "ClustersProto"; +option java_package = "com.google.cloud.dataproc.v1"; +option (google.api.resource_definition) = { + type: "container.googleapis.com/Cluster" + pattern: "projects/{project}/locations/{location}/clusters/{cluster}" +}; +option (google.api.resource_definition) = { + type: "metastore.googleapis.com/Service" + pattern: "projects/{project}/locations/{location}/services/{service}" +}; + +// The ClusterControllerService provides methods to manage clusters +// of Compute Engine instances. +service ClusterController { + option (google.api.default_host) = "dataproc.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a cluster in a project. The returned + // [Operation.metadata][google.longrunning.Operation.metadata] will be + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + rpc CreateCluster(CreateClusterRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/projects/{project_id}/regions/{region}/clusters" + body: "cluster" + }; + option (google.api.method_signature) = "project_id,region,cluster"; + option (google.longrunning.operation_info) = { + response_type: "Cluster" + metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" + }; + } + + // Updates a cluster in a project. The returned + // [Operation.metadata][google.longrunning.Operation.metadata] will be + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // The cluster must be in a + // [`RUNNING`][google.cloud.dataproc.v1.ClusterStatus.State] state or an error + // is returned. + rpc UpdateCluster(UpdateClusterRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" + body: "cluster" + }; + option (google.api.method_signature) = + "project_id,region,cluster_name,cluster,update_mask"; + option (google.longrunning.operation_info) = { + response_type: "Cluster" + metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" + }; + } + + // Stops a cluster in a project. + rpc StopCluster(StopClusterRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:stop" + body: "*" + }; + option (google.longrunning.operation_info) = { + response_type: "Cluster" + metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" + }; + } + + // Starts a cluster in a project. + rpc StartCluster(StartClusterRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:start" + body: "*" + }; + option (google.longrunning.operation_info) = { + response_type: "Cluster" + metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" + }; + } + + // Deletes a cluster in a project. The returned + // [Operation.metadata][google.longrunning.Operation.metadata] will be + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + rpc DeleteCluster(DeleteClusterRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + delete: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" + }; + option (google.api.method_signature) = "project_id,region,cluster_name"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" + }; + } + + // Gets the resource representation for a cluster in a project. + rpc GetCluster(GetClusterRequest) returns (Cluster) { + option (google.api.http) = { + get: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" + }; + option (google.api.method_signature) = "project_id,region,cluster_name"; + } + + // Lists all regions/{region}/clusters in a project alphabetically. + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + option (google.api.http) = { + get: "/v1/projects/{project_id}/regions/{region}/clusters" + }; + option (google.api.method_signature) = "project_id,region"; + option (google.api.method_signature) = "project_id,region,filter"; + } + + // Gets cluster diagnostic information. The returned + // [Operation.metadata][google.longrunning.Operation.metadata] will be + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // After the operation completes, + // [Operation.response][google.longrunning.Operation.response] + // contains + // [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). + rpc DiagnoseCluster(DiagnoseClusterRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose" + body: "*" + }; + option (google.api.method_signature) = "project_id,region,cluster_name"; + option (google.longrunning.operation_info) = { + response_type: "DiagnoseClusterResults" + metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" + }; + } +} + +// Describes the identifying information, config, and status of +// a Dataproc cluster +message Cluster { + // Required. The Google Cloud Platform project ID that the cluster belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The cluster name, which must be unique within a project. + // The name must start with a lowercase letter, and can contain + // up to 51 lowercase letters, numbers, and hyphens. It cannot end + // with a hyphen. The name of a deleted cluster can be reused. + string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The cluster config for a cluster of Compute Engine Instances. + // Note that Dataproc may set default values, and values may change + // when clusters are updated. + ClusterConfig config = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The virtual cluster config is used when creating a Dataproc + // cluster that does not directly control the underlying compute resources, + // for example, when creating a [Dataproc-on-GKE + // cluster](https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke). + // Dataproc may set default values, and values may change when + // clusters are updated. Exactly one of + // [config][google.cloud.dataproc.v1.Cluster.config] or + // [virtual_cluster_config][google.cloud.dataproc.v1.Cluster.virtual_cluster_config] + // must be specified. + VirtualClusterConfig virtual_cluster_config = 10 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The labels to associate with this cluster. + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to [RFC + // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + // associated with a cluster. + map labels = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Cluster status. + ClusterStatus status = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The previous cluster status. + repeated ClusterStatus status_history = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A cluster UUID (Unique Universal Identifier). Dataproc + // generates this value when it creates the cluster. + string cluster_uuid = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Contains cluster daemon metrics such as HDFS and YARN stats. + // + // **Beta Feature**: This report is available for testing purposes only. It + // may be changed before final release. + ClusterMetrics metrics = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// The cluster config. +message ClusterConfig { + // Optional. A Cloud Storage bucket used to stage job + // dependencies, config files, and job driver console output. + // If you do not specify a staging bucket, Cloud + // Dataproc will determine a Cloud Storage location (US, + // ASIA, or EU) for your cluster's staging bucket according to the + // Compute Engine zone where your cluster is deployed, and then create + // and manage this project-level, per-location bucket (see + // [Dataproc staging and temp + // buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // **This field requires a Cloud Storage bucket name, not a `gs://...` URI to + // a Cloud Storage bucket.** + string config_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs + // data, such as Spark and MapReduce history files. If you do not specify a + // temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or + // EU) for your cluster's temp bucket according to the Compute Engine zone + // where your cluster is deployed, and then create and manage this + // project-level, per-location bucket. The default bucket has a TTL of 90 + // days, but you can use any TTL (or none) if you specify a bucket (see + // [Dataproc staging and temp + // buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // **This field requires a Cloud Storage bucket name, not a `gs://...` URI to + // a Cloud Storage bucket.** + string temp_bucket = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The shared Compute Engine config settings for + // all instances in a cluster. + GceClusterConfig gce_cluster_config = 8 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Compute Engine config settings for + // the cluster's master instance. + InstanceGroupConfig master_config = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Compute Engine config settings for + // the cluster's worker instances. + InstanceGroupConfig worker_config = 10 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Compute Engine config settings for + // a cluster's secondary worker instances + InstanceGroupConfig secondary_worker_config = 12 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The config settings for cluster software. + SoftwareConfig software_config = 13 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Commands to execute on each node after config is + // completed. By default, executables are run on master and all worker nodes. + // You can test a node's `role` metadata to run an executable on + // a master or worker node, as shown below using `curl` (you can also use + // `wget`): + // + // ROLE=$(curl -H Metadata-Flavor:Google + // http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + // if [[ "${ROLE}" == 'Master' ]]; then + // ... master specific actions ... + // else + // ... worker specific actions ... + // fi + repeated NodeInitializationAction initialization_actions = 11 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Encryption settings for the cluster. + EncryptionConfig encryption_config = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Autoscaling config for the policy associated with the cluster. + // Cluster does not autoscale if this field is unset. + AutoscalingConfig autoscaling_config = 18 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Security settings for the cluster. + SecurityConfig security_config = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Lifecycle setting for the cluster. + LifecycleConfig lifecycle_config = 17 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Port/endpoint configuration for this cluster + EndpointConfig endpoint_config = 19 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Metastore configuration. + MetastoreConfig metastore_config = 20 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The config for Dataproc metrics. + DataprocMetricConfig dataproc_metric_config = 23 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The node group settings. + repeated AuxiliaryNodeGroup auxiliary_node_groups = 25 + [(google.api.field_behavior) = OPTIONAL]; +} + +// The Dataproc cluster config for a cluster that does not directly control the +// underlying compute resources, such as a [Dataproc-on-GKE +// cluster](https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke). +message VirtualClusterConfig { + // Optional. A Cloud Storage bucket used to stage job + // dependencies, config files, and job driver console output. + // If you do not specify a staging bucket, Cloud + // Dataproc will determine a Cloud Storage location (US, + // ASIA, or EU) for your cluster's staging bucket according to the + // Compute Engine zone where your cluster is deployed, and then create + // and manage this project-level, per-location bucket (see + // [Dataproc staging and temp + // buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // **This field requires a Cloud Storage bucket name, not a `gs://...` URI to + // a Cloud Storage bucket.** + string staging_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; + + oneof infrastructure_config { + // Required. The configuration for running the Dataproc cluster on + // Kubernetes. + KubernetesClusterConfig kubernetes_cluster_config = 6 + [(google.api.field_behavior) = REQUIRED]; + } + + // Optional. Configuration of auxiliary services used by this cluster. + AuxiliaryServicesConfig auxiliary_services_config = 7 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Auxiliary services configuration for a Cluster. +message AuxiliaryServicesConfig { + // Optional. The Hive Metastore configuration for this workload. + MetastoreConfig metastore_config = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Spark History Server configuration for the workload. + SparkHistoryServerConfig spark_history_server_config = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Endpoint config for this cluster +message EndpointConfig { + // Output only. The map of port descriptions to URLs. Will only be populated + // if enable_http_port_access is true. + map http_ports = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. If true, enable http access to specific ports on the cluster + // from external sources. Defaults to false. + bool enable_http_port_access = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// Autoscaling Policy config associated with the cluster. +message AutoscalingConfig { + // Optional. The autoscaling policy used by the cluster. + // + // Only resource names including projectid and location (region) are valid. + // Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` + // * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` + // + // Note that the policy must be in the same project and Dataproc region. + string policy_uri = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// Encryption settings for the cluster. +message EncryptionConfig { + // Optional. The Cloud KMS key name to use for PD disk encryption for all + // instances in the cluster. + string gce_pd_kms_key_name = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// Common config settings for resources of Compute Engine cluster +// instances, applicable to all instances in the cluster. +message GceClusterConfig { + // `PrivateIpv6GoogleAccess` controls whether and how Dataproc cluster nodes + // can communicate with Google Services through gRPC over IPv6. + // These values are directly mapped to corresponding values in the + // [Compute Engine Instance + // fields](https://cloud.google.com/compute/docs/reference/rest/v1/instances). + enum PrivateIpv6GoogleAccess { + // If unspecified, Compute Engine default behavior will apply, which + // is the same as + // [INHERIT_FROM_SUBNETWORK][google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess.INHERIT_FROM_SUBNETWORK]. + PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED = 0; + + // Private access to and from Google Services configuration + // inherited from the subnetwork configuration. This is the + // default Compute Engine behavior. + INHERIT_FROM_SUBNETWORK = 1; + + // Enables outbound private IPv6 access to Google Services from the Dataproc + // cluster. + OUTBOUND = 2; + + // Enables bidirectional private IPv6 access between Google Services and the + // Dataproc cluster. + BIDIRECTIONAL = 3; + } + + // Optional. The zone where the Compute Engine cluster will be located. + // On a create request, it is required in the "global" region. If omitted + // in a non-global Dataproc region, the service will pick a zone in the + // corresponding Compute Engine region. On a get request, zone will + // always be present. + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` + // * `projects/[project_id]/zones/[zone]` + // * `us-central1-f` + string zone_uri = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Compute Engine network to be used for machine + // communications. Cannot be specified with subnetwork_uri. If neither + // `network_uri` nor `subnetwork_uri` is specified, the "default" network of + // the project is used, if it exists. Cannot be a "Custom Subnet Network" (see + // [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for + // more information). + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` + // * `projects/[project_id]/regions/global/default` + // * `default` + string network_uri = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Compute Engine subnetwork to be used for machine + // communications. Cannot be specified with network_uri. + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0` + // * `projects/[project_id]/regions/us-east1/subnetworks/sub0` + // * `sub0` + string subnetwork_uri = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true, all instances in the cluster will only have internal IP + // addresses. By default, clusters are not restricted to internal IP + // addresses, and will have ephemeral external IP addresses assigned to each + // instance. This `internal_ip_only` restriction can only be enabled for + // subnetwork enabled networks, and all off-cluster dependencies must be + // configured to be accessible without external IP addresses. + bool internal_ip_only = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The type of IPv6 access for a cluster. + PrivateIpv6GoogleAccess private_ipv6_google_access = 12 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The [Dataproc service + // account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) + // (also see [VM Data Plane + // identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) + // used by Dataproc cluster VM instances to access Google Cloud Platform + // services. + // + // If not specified, the + // [Compute Engine default service + // account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) + // is used. + string service_account = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The URIs of service account scopes to be included in + // Compute Engine instances. The following base set of scopes is always + // included: + // + // * https://www.googleapis.com/auth/cloud.useraccounts.readonly + // * https://www.googleapis.com/auth/devstorage.read_write + // * https://www.googleapis.com/auth/logging.write + // + // If no scopes are specified, the following defaults are also provided: + // + // * https://www.googleapis.com/auth/bigquery + // * https://www.googleapis.com/auth/bigtable.admin.table + // * https://www.googleapis.com/auth/bigtable.data + // * https://www.googleapis.com/auth/devstorage.full_control + repeated string service_account_scopes = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // The Compute Engine tags to add to all instances (see [Tagging + // instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + repeated string tags = 4; + + // The Compute Engine metadata entries to add to all instances (see + // [Project and instance + // metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + map metadata = 5; + + // Optional. Reservation Affinity for consuming Zonal reservation. + ReservationAffinity reservation_affinity = 11 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Node Group Affinity for sole-tenant clusters. + NodeGroupAffinity node_group_affinity = 13 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Shielded Instance Config for clusters using [Compute Engine + // Shielded + // VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm). + ShieldedInstanceConfig shielded_instance_config = 14 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Confidential Instance Config for clusters using [Confidential + // VMs](https://cloud.google.com/compute/confidential-vm/docs). + ConfidentialInstanceConfig confidential_instance_config = 15 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Node Group Affinity for clusters using sole-tenant node groups. +// **The Dataproc `NodeGroupAffinity` resource is not related to the +// Dataproc [NodeGroup][google.cloud.dataproc.v1.NodeGroup] resource.** +message NodeGroupAffinity { + // Required. The URI of a + // sole-tenant [node group + // resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) + // that the cluster will be created on. + // + // A full URL, partial URI, or node group name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` + // * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` + // * `node-group-1` + string node_group_uri = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Shielded Instance Config for clusters using [Compute Engine Shielded +// VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm). +message ShieldedInstanceConfig { + // Optional. Defines whether instances have Secure Boot enabled. + bool enable_secure_boot = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines whether instances have the vTPM enabled. + bool enable_vtpm = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines whether instances have integrity monitoring enabled. + bool enable_integrity_monitoring = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Confidential Instance Config for clusters using [Confidential +// VMs](https://cloud.google.com/compute/confidential-vm/docs) +message ConfidentialInstanceConfig { + // Optional. Defines whether the instance should have confidential compute + // enabled. + bool enable_confidential_compute = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// The config settings for Compute Engine resources in +// an instance group, such as a master or worker group. +message InstanceGroupConfig { + // Controls the use of preemptible instances within the group. + enum Preemptibility { + // Preemptibility is unspecified, the system will choose the + // appropriate setting for each instance group. + PREEMPTIBILITY_UNSPECIFIED = 0; + + // Instances are non-preemptible. + // + // This option is allowed for all instance groups and is the only valid + // value for Master and Worker instance groups. + NON_PREEMPTIBLE = 1; + + // Instances are [preemptible] + // (https://cloud.google.com/compute/docs/instances/preemptible). + // + // This option is allowed only for [secondary worker] + // (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) + // groups. + PREEMPTIBLE = 2; + } + + // Optional. The number of VM instances in the instance group. + // For [HA + // cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) + // [master_config](#FIELDS.master_config) groups, **must be set to 3**. + // For standard cluster [master_config](#FIELDS.master_config) groups, + // **must be set to 1**. + int32 num_instances = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The list of instance names. Dataproc derives the names + // from `cluster_name`, `num_instances`, and the instance group. + repeated string instance_names = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The Compute Engine image resource used for cluster instances. + // + // The URI can represent an image or image family. + // + // Image examples: + // + // * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` + // * `projects/[project_id]/global/images/[image-id]` + // * `image-id` + // + // Image family examples. Dataproc will use the most recent + // image from the family: + // + // * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` + // * `projects/[project_id]/global/images/family/[custom-image-family-name]` + // + // If the URI is unspecified, it will be inferred from + // `SoftwareConfig.image_version` or the system default. + string image_uri = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Compute Engine machine type used for cluster instances. + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` + // * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` + // * `n1-standard-2` + // + // **Auto Zone Exception**: If you are using the Dataproc + // [Auto Zone + // Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + // feature, you must use the short name of the machine type + // resource, for example, `n1-standard-2`. + string machine_type_uri = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Disk option config settings. + DiskConfig disk_config = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Specifies that this instance group contains preemptible + // instances. + bool is_preemptible = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Specifies the preemptibility of the instance group. + // + // The default value for master and worker groups is + // `NON_PREEMPTIBLE`. This default cannot be changed. + // + // The default value for secondary instances is + // `PREEMPTIBLE`. + Preemptibility preemptibility = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The config for Compute Engine Instance Group + // Manager that manages this group. + // This is only used for preemptible instance groups. + ManagedGroupConfig managed_group_config = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The Compute Engine accelerator configuration for these + // instances. + repeated AcceleratorConfig accelerators = 8 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the minimum cpu platform for the Instance Group. + // See [Dataproc -> Minimum CPU + // Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + string min_cpu_platform = 9 [(google.api.field_behavior) = OPTIONAL]; +} + +// Specifies the resources used to actively manage an instance group. +message ManagedGroupConfig { + // Output only. The name of the Instance Template used for the Managed + // Instance Group. + string instance_template_name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The name of the Instance Group Manager for this group. + string instance_group_manager_name = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Specifies the type and number of accelerator cards attached to the instances +// of an instance. See [GPUs on Compute +// Engine](https://cloud.google.com/compute/docs/gpus/). +message AcceleratorConfig { + // Full URL, partial URI, or short name of the accelerator type resource to + // expose to this instance. See + // [Compute Engine + // AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). + // + // Examples: + // + // * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + // * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + // * `nvidia-tesla-k80` + // + // **Auto Zone Exception**: If you are using the Dataproc + // [Auto Zone + // Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + // feature, you must use the short name of the accelerator type + // resource, for example, `nvidia-tesla-k80`. + string accelerator_type_uri = 1; + + // The number of the accelerator cards of this type exposed to this instance. + int32 accelerator_count = 2; +} + +// Specifies the config of disk options for a group of VM instances. +message DiskConfig { + // Optional. Type of the boot disk (default is "pd-standard"). + // Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), + // "pd-ssd" (Persistent Disk Solid State Drive), + // or "pd-standard" (Persistent Disk Hard Disk Drive). + // See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types). + string boot_disk_type = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Size in GB of the boot disk (default is 500GB). + int32 boot_disk_size_gb = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Number of attached SSDs, from 0 to 8 (default is 0). + // If SSDs are not attached, the boot disk is used to store runtime logs and + // [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. + // If one or more SSDs are attached, this runtime bulk + // data is spread across them, and the boot disk contains only basic + // config and installed binaries. + int32 num_local_ssds = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Interface type of local SSDs (default is "scsi"). + // Valid values: "scsi" (Small Computer System Interface), + // "nvme" (Non-Volatile Memory Express). + // See [local SSD + // performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance). + string local_ssd_interface = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Node group identification and configuration information. +message AuxiliaryNodeGroup { + // Required. Node group configuration. + NodeGroup node_group = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A node group ID. Generated if not specified. + // + // The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). Cannot begin or end with underscore + // or hyphen. Must consist of from 3 to 33 characters. + string node_group_id = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// Dataproc Node Group. +// **The Dataproc `NodeGroup` resource is not related to the +// Dataproc [NodeGroupAffinity][google.cloud.dataproc.v1.NodeGroupAffinity] +// resource.** +message NodeGroup { + option (google.api.resource) = { + type: "dataproc.googleapis.com/NodeGroup" + pattern: "projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}" + }; + + // Node group roles. + enum Role { + // Required unspecified role. + ROLE_UNSPECIFIED = 0; + + // Job drivers run on the node group. + DRIVER = 1; + } + + // The Node group [resource name](https://aip.dev/122). + string name = 1; + + // Required. Node group roles. + repeated Role roles = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The node group instance group configuration. + InstanceGroupConfig node_group_config = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Node group labels. + // + // * Label **keys** must consist of from 1 to 63 characters and conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // * Label **values** can be empty. If specified, they must consist of from + // 1 to 63 characters and conform to [RFC 1035] + // (https://www.ietf.org/rfc/rfc1035.txt). + // * The node group must have no more than 32 labels. + map labels = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Specifies an executable to run on a fully configured node and a +// timeout period for executable completion. +message NodeInitializationAction { + // Required. Cloud Storage URI of executable file. + string executable_file = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Amount of time executable has to complete. Default is + // 10 minutes (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + // + // Cluster creation fails with an explanatory error message (the + // name of the executable that caused the error and the exceeded timeout + // period) if the executable is not completed at end of the timeout period. + google.protobuf.Duration execution_timeout = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// The status of a cluster and its instances. +message ClusterStatus { + // The cluster state. + enum State { + // The cluster state is unknown. + UNKNOWN = 0; + + // The cluster is being created and set up. It is not ready for use. + CREATING = 1; + + // The cluster is currently running and healthy. It is ready for use. + // + // **Note:** The cluster state changes from "creating" to "running" status + // after the master node(s), first two primary worker nodes (and the last + // primary worker node if primary workers > 2) are running. + RUNNING = 2; + + // The cluster encountered an error. It is not ready for use. + ERROR = 3; + + // The cluster has encountered an error while being updated. Jobs can + // be submitted to the cluster, but the cluster cannot be updated. + ERROR_DUE_TO_UPDATE = 9; + + // The cluster is being deleted. It cannot be used. + DELETING = 4; + + // The cluster is being updated. It continues to accept and process jobs. + UPDATING = 5; + + // The cluster is being stopped. It cannot be used. + STOPPING = 6; + + // The cluster is currently stopped. It is not ready for use. + STOPPED = 7; + + // The cluster is being started. It is not ready for use. + STARTING = 8; + } + + // The cluster substate. + enum Substate { + // The cluster substate is unknown. + UNSPECIFIED = 0; + + // The cluster is known to be in an unhealthy state + // (for example, critical daemons are not running or HDFS capacity is + // exhausted). + // + // Applies to RUNNING state. + UNHEALTHY = 1; + + // The agent-reported status is out of date (may occur if + // Dataproc loses communication with Agent). + // + // Applies to RUNNING state. + STALE_STATUS = 2; + } + + // Output only. The cluster's state. + State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Output only. Details of cluster's state. + string detail = 2 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Output only. Time when this state was entered (see JSON representation of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Timestamp state_start_time = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Additional state information that includes + // status reported by the agent. + Substate substate = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Security related configuration, including encryption, Kerberos, etc. +message SecurityConfig { + // Optional. Kerberos related configuration. + KerberosConfig kerberos_config = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Identity related configuration, including service account based + // secure multi-tenancy user mappings. + IdentityConfig identity_config = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// Specifies Kerberos related configuration. +message KerberosConfig { + // Optional. Flag to indicate whether to Kerberize the cluster (default: + // false). Set this field to true to enable Kerberos on a cluster. + bool enable_kerberos = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Cloud Storage URI of a KMS encrypted file containing the root + // principal password. + string root_principal_password_uri = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The uri of the KMS key used to encrypt various sensitive + // files. + string kms_key_uri = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Cloud Storage URI of the keystore file used for SSL + // encryption. If not provided, Dataproc will provide a self-signed + // certificate. + string keystore_uri = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Cloud Storage URI of the truststore file used for SSL + // encryption. If not provided, Dataproc will provide a self-signed + // certificate. + string truststore_uri = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Cloud Storage URI of a KMS encrypted file containing the + // password to the user provided keystore. For the self-signed certificate, + // this password is generated by Dataproc. + string keystore_password_uri = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Cloud Storage URI of a KMS encrypted file containing the + // password to the user provided key. For the self-signed certificate, this + // password is generated by Dataproc. + string key_password_uri = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Cloud Storage URI of a KMS encrypted file containing the + // password to the user provided truststore. For the self-signed certificate, + // this password is generated by Dataproc. + string truststore_password_uri = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The remote realm the Dataproc on-cluster KDC will trust, should + // the user enable cross realm trust. + string cross_realm_trust_realm = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The KDC (IP or hostname) for the remote trusted realm in a cross + // realm trust relationship. + string cross_realm_trust_kdc = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The admin server (IP or hostname) for the remote trusted realm in + // a cross realm trust relationship. + string cross_realm_trust_admin_server = 11 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Cloud Storage URI of a KMS encrypted file containing the + // shared password between the on-cluster Kerberos realm and the remote + // trusted realm, in a cross realm trust relationship. + string cross_realm_trust_shared_password_uri = 12 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Cloud Storage URI of a KMS encrypted file containing the + // master key of the KDC database. + string kdc_db_key_uri = 13 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The lifetime of the ticket granting ticket, in hours. + // If not specified, or user specifies 0, then default value 10 + // will be used. + int32 tgt_lifetime_hours = 14 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The name of the on-cluster Kerberos realm. + // If not specified, the uppercased domain of hostnames will be the realm. + string realm = 15 [(google.api.field_behavior) = OPTIONAL]; +} + +// Identity related configuration, including service account based +// secure multi-tenancy user mappings. +message IdentityConfig { + // Required. Map of user to service account. + map user_service_account_mapping = 1 + [(google.api.field_behavior) = REQUIRED]; +} + +// Specifies the selection and config of software inside the cluster. +message SoftwareConfig { + // Optional. The version of software inside the cluster. It must be one of the + // supported [Dataproc + // Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), + // such as "1.2" (including a subminor version, such as "1.2.29"), or the + // ["preview" + // version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). + // If unspecified, it defaults to the latest Debian version. + string image_version = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The properties to set on daemon config files. + // + // Property keys are specified in `prefix:property` format, for example + // `core:hadoop.tmp.dir`. The following are supported prefixes + // and their mappings: + // + // * capacity-scheduler: `capacity-scheduler.xml` + // * core: `core-site.xml` + // * distcp: `distcp-default.xml` + // * hdfs: `hdfs-site.xml` + // * hive: `hive-site.xml` + // * mapred: `mapred-site.xml` + // * pig: `pig.properties` + // * spark: `spark-defaults.conf` + // * yarn: `yarn-site.xml` + // + // For more information, see [Cluster + // properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties). + map properties = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The set of components to activate on the cluster. + repeated Component optional_components = 3 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Specifies the cluster auto-delete schedule configuration. +message LifecycleConfig { + // Optional. The duration to keep the cluster alive while idling (when no jobs + // are running). Passing this threshold will cause the cluster to be + // deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON + // representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Duration idle_delete_ttl = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Either the exact time the cluster should be deleted at or + // the cluster maximum age. + oneof ttl { + // Optional. The time when cluster will be auto-deleted (see JSON + // representation of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Timestamp auto_delete_time = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The lifetime duration of cluster. The cluster will be + // auto-deleted at the end of this period. Minimum value is 10 minutes; + // maximum value is 14 days (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Duration auto_delete_ttl = 3 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Output only. The time when cluster became idle (most recent job finished) + // and became eligible for deletion due to idleness (see JSON representation + // of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Timestamp idle_start_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Specifies a Metastore configuration. +message MetastoreConfig { + // Required. Resource name of an existing Dataproc Metastore service. + // + // Example: + // + // * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]` + string dataproc_metastore_service = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "metastore.googleapis.com/Service" + } + ]; +} + +// Dataproc metric config. +message DataprocMetricConfig { + // A source for the collection of Dataproc OSS metrics (see [available OSS + // metrics] + // (https://cloud.google.com//dataproc/docs/guides/monitoring#available_oss_metrics)). + enum MetricSource { + // Required unspecified metric source. + METRIC_SOURCE_UNSPECIFIED = 0; + + // Default monitoring agent metrics. If this source is enabled, + // Dataproc enables the monitoring agent in Compute Engine, + // and collects default monitoring agent metrics, which are published + // with an `agent.googleapis.com` prefix. + MONITORING_AGENT_DEFAULTS = 1; + + // HDFS metric source. + HDFS = 2; + + // Spark metric source. + SPARK = 3; + + // YARN metric source. + YARN = 4; + + // Spark History Server metric source. + SPARK_HISTORY_SERVER = 5; + + // Hiveserver2 metric source. + HIVESERVER2 = 6; + } + + // A Dataproc OSS metric. + message Metric { + // Required. Default metrics are collected unless `metricOverrides` are + // specified for the metric source (see [Available OSS metrics] + // (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) + // for more information). + MetricSource metric_source = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Specify one or more [available OSS metrics] + // (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) + // to collect for the metric course (for the `SPARK` metric source, any + // [Spark metric] + // (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be + // specified). + // + // Provide metrics in the following format: + // METRIC_SOURCE:INSTANCE:GROUP:METRIC + // Use camelcase as appropriate. + // + // Examples: + // + // ``` + // yarn:ResourceManager:QueueMetrics:AppsCompleted + // spark:driver:DAGScheduler:job.allJobs + // sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed + // hiveserver2:JVM:Memory:NonHeapMemoryUsage.used + // ``` + // + // Notes: + // + // * Only the specified overridden metrics will be collected for the + // metric source. For example, if one or more `spark:executive` metrics + // are listed as metric overrides, other `SPARK` metrics will not be + // collected. The collection of the default metrics for other OSS metric + // sources is unaffected. For example, if both `SPARK` andd `YARN` metric + // sources are enabled, and overrides are provided for Spark metrics only, + // all default YARN metrics will be collected. + repeated string metric_overrides = 2 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Required. Metrics sources to enable. + repeated Metric metrics = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Contains cluster daemon metrics, such as HDFS and YARN stats. +// +// **Beta Feature**: This report is available for testing purposes only. It may +// be changed before final release. +message ClusterMetrics { + // The HDFS metrics. + map hdfs_metrics = 1; + + // The YARN metrics. + map yarn_metrics = 2; +} + +// A request to create a cluster. +message CreateClusterRequest { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The cluster to create. + Cluster cluster = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A unique ID used to identify the request. If the server receives + // two + // [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s + // with the same id, then the second request will be ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. + // + // It is recommended to always set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Failure action when primary worker creation fails. + FailureAction action_on_failed_primary_workers = 5 + [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to update a cluster. +message UpdateClusterRequest { + // Required. The ID of the Google Cloud Platform project the + // cluster belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 5 [(google.api.field_behavior) = REQUIRED]; + + // Required. The cluster name. + string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The changes to the cluster. + Cluster cluster = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Timeout for graceful YARN decomissioning. Graceful + // decommissioning allows removing nodes from the cluster without + // interrupting jobs in progress. Timeout specifies how long to wait for jobs + // in progress to finish before forcefully removing nodes (and potentially + // interrupting jobs). Default timeout is 0 (for forceful decommission), and + // the maximum allowed timeout is 1 day. (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + // + // Only supported on Dataproc image versions 1.2 and higher. + google.protobuf.Duration graceful_decommission_timeout = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Required. Specifies the path, relative to `Cluster`, of + // the field to update. For example, to change the number of workers + // in a cluster to 5, the `update_mask` parameter would be + // specified as `config.worker_config.num_instances`, + // and the `PATCH` request body would specify the new value, as follows: + // + // { + // "config":{ + // "workerConfig":{ + // "numInstances":"5" + // } + // } + // } + // Similarly, to change the number of preemptible workers in a cluster to 5, + // the `update_mask` parameter would be + // `config.secondary_worker_config.num_instances`, and the `PATCH` request + // body would be set as follows: + // + // { + // "config":{ + // "secondaryWorkerConfig":{ + // "numInstances":"5" + // } + // } + // } + // Note: Currently, only the following fields can be updated: + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + //
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or + // change autoscaling policies
+ google.protobuf.FieldMask update_mask = 4 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. A unique ID used to identify the request. If the server + // receives two + // [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s + // with the same id, then the second request will be ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. + // + // It is recommended to always set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 7 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to stop a cluster. +message StopClusterRequest { + // Required. The ID of the Google Cloud Platform project the + // cluster belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The cluster name. + string cluster_name = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Specifying the `cluster_uuid` means the RPC will fail + // (with error NOT_FOUND) if a cluster with the specified UUID does not exist. + string cluster_uuid = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique ID used to identify the request. If the server + // receives two + // [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s + // with the same id, then the second request will be ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. + // + // Recommendation: Set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to start a cluster. +message StartClusterRequest { + // Required. The ID of the Google Cloud Platform project the + // cluster belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The cluster name. + string cluster_name = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Specifying the `cluster_uuid` means the RPC will fail + // (with error NOT_FOUND) if a cluster with the specified UUID does not exist. + string cluster_uuid = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique ID used to identify the request. If the server + // receives two + // [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s + // with the same id, then the second request will be ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. + // + // Recommendation: Set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to delete a cluster. +message DeleteClusterRequest { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The cluster name. + string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Specifying the `cluster_uuid` means the RPC should fail + // (with error NOT_FOUND) if cluster with specified UUID does not exist. + string cluster_uuid = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique ID used to identify the request. If the server + // receives two + // [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s + // with the same id, then the second request will be ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. + // + // It is recommended to always set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request to get the resource representation for a cluster in a project. +message GetClusterRequest { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The cluster name. + string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// A request to list the clusters in a project. +message ListClustersRequest { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 4 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A filter constraining the clusters to list. Filters are + // case-sensitive and have the following syntax: + // + // field = value [AND [field = value]] ... + // + // where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, + // and `[KEY]` is a label key. **value** can be `*` to match all values. + // `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, + // `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` + // contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` + // contains the `DELETING` and `ERROR` states. + // `clusterName` is the name of the cluster provided at creation time. + // Only the logical `AND` operator is supported; space-separated items are + // treated as having an implicit `AND` operator. + // + // Example filter: + // + // status.state = ACTIVE AND clusterName = mycluster + // AND labels.env = staging AND labels.starred = * + string filter = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The standard List page size. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The standard List page token. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The list of all clusters in a project. +message ListClustersResponse { + // Output only. The clusters in the project. + repeated Cluster clusters = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // `page_token` in a subsequent `ListClustersRequest`. + string next_page_token = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// A request to collect cluster diagnostic information. +message DiagnoseClusterRequest { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The cluster name. + string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// The location of diagnostic output. +message DiagnoseClusterResults { + // Output only. The Cloud Storage URI of the diagnostic output. + // The output report is a plain text file with a summary of collected + // diagnostics. + string output_uri = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Reservation Affinity for consuming Zonal reservation. +message ReservationAffinity { + // Indicates whether to consume capacity from an reservation or not. + enum Type { + TYPE_UNSPECIFIED = 0; + + // Do not consume from any allocated capacity. + NO_RESERVATION = 1; + + // Consume any reservation available. + ANY_RESERVATION = 2; + + // Must consume from a specific reservation. Must specify key value fields + // for specifying the reservations. + SPECIFIC_RESERVATION = 3; + } + + // Optional. Type of reservation to consume + Type consume_reservation_type = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Corresponds to the label key of reservation resource. + string key = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Corresponds to the label values of reservation resource. + repeated string values = 3 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/jobs.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/jobs.proto new file mode 100644 index 00000000000..e9dcf9cc6b8 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/jobs.proto @@ -0,0 +1,951 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "JobsProto"; +option java_package = "com.google.cloud.dataproc.v1"; + +// The JobController provides methods to manage jobs. +service JobController { + option (google.api.default_host) = "dataproc.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Submits a job to a cluster. + rpc SubmitJob(SubmitJobRequest) returns (Job) { + option (google.api.http) = { + post: "/v1/projects/{project_id}/regions/{region}/jobs:submit" + body: "*" + }; + option (google.api.method_signature) = "project_id,region,job"; + } + + // Submits job to a cluster. + rpc SubmitJobAsOperation(SubmitJobRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/projects/{project_id}/regions/{region}/jobs:submitAsOperation" + body: "*" + }; + option (google.api.method_signature) = "project_id, region, job"; + option (google.longrunning.operation_info) = { + response_type: "Job" + metadata_type: "JobMetadata" + }; + } + + // Gets the resource representation for a job in a project. + rpc GetJob(GetJobRequest) returns (Job) { + option (google.api.http) = { + get: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}" + }; + option (google.api.method_signature) = "project_id,region,job_id"; + } + + // Lists regions/{region}/jobs in a project. + rpc ListJobs(ListJobsRequest) returns (ListJobsResponse) { + option (google.api.http) = { + get: "/v1/projects/{project_id}/regions/{region}/jobs" + }; + option (google.api.method_signature) = "project_id,region"; + option (google.api.method_signature) = "project_id,region,filter"; + } + + // Updates a job in a project. + rpc UpdateJob(UpdateJobRequest) returns (Job) { + option (google.api.http) = { + patch: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}" + body: "job" + }; + } + + // Starts a job cancellation request. To access the job resource + // after cancellation, call + // [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) + // or + // [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). + rpc CancelJob(CancelJobRequest) returns (Job) { + option (google.api.http) = { + post: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel" + body: "*" + }; + option (google.api.method_signature) = "project_id,region,job_id"; + } + + // Deletes the job from the project. If the job is active, the delete fails, + // and the response returns `FAILED_PRECONDITION`. + rpc DeleteJob(DeleteJobRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}" + }; + option (google.api.method_signature) = "project_id,region,job_id"; + } +} + +// The runtime logging config of the job. +message LoggingConfig { + // The Log4j level for job execution. When running an + // [Apache Hive](https://hive.apache.org/) job, Cloud + // Dataproc configures the Hive client to an equivalent verbosity level. + enum Level { + // Level is unspecified. Use default level for log4j. + LEVEL_UNSPECIFIED = 0; + + // Use ALL level for log4j. + ALL = 1; + + // Use TRACE level for log4j. + TRACE = 2; + + // Use DEBUG level for log4j. + DEBUG = 3; + + // Use INFO level for log4j. + INFO = 4; + + // Use WARN level for log4j. + WARN = 5; + + // Use ERROR level for log4j. + ERROR = 6; + + // Use FATAL level for log4j. + FATAL = 7; + + // Turn off log4j. + OFF = 8; + } + + // The per-package log levels for the driver. This may include + // "root" package name to configure rootLogger. + // Examples: + // 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + map driver_log_levels = 2; +} + +// A Dataproc job for running +// [Apache Hadoop +// MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) +// jobs on [Apache Hadoop +// YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). +message HadoopJob { + // Required. Indicates the location of the driver's main class. Specify + // either the jar file that contains the main class or the main class name. + // To specify both, add the jar file to `jar_file_uris`, and then specify + // the main class name in this property. + oneof driver { + // The HCFS URI of the jar file containing the main class. + // Examples: + // 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' + // 'hdfs:/tmp/test-samples/custom-wordcount.jar' + // 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' + string main_jar_file_uri = 1; + + // The name of the driver's main class. The jar file containing the class + // must be in the default CLASSPATH or specified in `jar_file_uris`. + string main_class = 2; + } + + // Optional. The arguments to pass to the driver. Do not + // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as + // job properties, since a collision may occur that causes an incorrect job + // submission. + repeated string args = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Jar file URIs to add to the CLASSPATHs of the + // Hadoop driver and tasks. + repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied + // to the working directory of Hadoop drivers and distributed tasks. Useful + // for naively parallel tasks. + repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of archives to be extracted in the working directory of + // Hadoop drivers and tasks. Supported file types: + // .jar, .tar, .tar.gz, .tgz, or .zip. + repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values, used to configure Hadoop. + // Properties that conflict with values set by the Dataproc API may be + // overwritten. Can include properties set in /etc/hadoop/conf/*-site and + // classes in user code. + map properties = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL]; +} + +// A Dataproc job for running [Apache Spark](https://spark.apache.org/) +// applications on YARN. +message SparkJob { + // Required. The specification of the main method to call to drive the job. + // Specify either the jar file that contains the main class or the main class + // name. To pass both a main jar and a main class in that jar, add the jar to + // `CommonJob.jar_file_uris`, and then specify the main class name in + // `main_class`. + oneof driver { + // The HCFS URI of the jar file that contains the main class. + string main_jar_file_uri = 1; + + // The name of the driver's main class. The jar file that contains the class + // must be in the default CLASSPATH or specified in `jar_file_uris`. + string main_class = 2; + } + + // Optional. The arguments to pass to the driver. Do not include arguments, + // such as `--conf`, that can be set as job properties, since a collision may + // occur that causes an incorrect job submission. + repeated string args = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the + // Spark driver and tasks. + repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. Useful for naively parallel tasks. + repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: + // .jar, .tar, .tar.gz, .tgz, and .zip. + repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values, used to configure Spark. + // Properties that conflict with values set by the Dataproc API may be + // overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + map properties = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL]; +} + +// A Dataproc job for running +// [Apache +// PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) +// applications on YARN. +message PySparkJob { + // Required. The HCFS URI of the main Python file to use as the driver. Must + // be a .py file. + string main_python_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The arguments to pass to the driver. Do not include arguments, + // such as `--conf`, that can be set as job properties, since a collision may + // occur that causes an incorrect job submission. + repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS file URIs of Python files to pass to the PySpark + // framework. Supported file types: .py, .egg, and .zip. + repeated string python_file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the + // Python driver and tasks. + repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. Useful for naively parallel tasks. + repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: + // .jar, .tar, .tar.gz, .tgz, and .zip. + repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values, used to configure PySpark. + // Properties that conflict with values set by the Dataproc API may be + // overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + map properties = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL]; +} + +// A list of queries to run on a cluster. +message QueryList { + // Required. The queries to execute. You do not need to end a query expression + // with a semicolon. Multiple queries can be specified in one + // string by separating each with a semicolon. Here is an example of a + // Dataproc API snippet that uses a QueryList to specify a HiveJob: + // + // "hiveJob": { + // "queryList": { + // "queries": [ + // "query1", + // "query2", + // "query3;query4", + // ] + // } + // } + repeated string queries = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// A Dataproc job for running [Apache Hive](https://hive.apache.org/) +// queries on YARN. +message HiveJob { + // Required. The sequence of Hive queries to execute, specified as either + // an HCFS file URI or a list of queries. + oneof queries { + // The HCFS URI of the script that contains Hive queries. + string query_file_uri = 1; + + // A list of queries. + QueryList query_list = 2; + } + + // Optional. Whether to continue executing queries if a query fails. + // The default value is `false`. Setting to `true` can be useful when + // executing independent parallel queries. + bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Mapping of query variable names to values (equivalent to the + // Hive command: `SET name="value";`). + map script_variables = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names and values, used to configure Hive. + // Properties that conflict with values set by the Dataproc API may be + // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, + // /etc/hive/conf/hive-site.xml, and classes in user code. + map properties = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of jar files to add to the CLASSPATH of the + // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes + // and UDFs. + repeated string jar_file_uris = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// A Dataproc job for running [Apache Spark +// SQL](https://spark.apache.org/sql/) queries. +message SparkSqlJob { + // Required. The sequence of Spark SQL queries to execute, specified as + // either an HCFS file URI or as a list of queries. + oneof queries { + // The HCFS URI of the script that contains SQL queries. + string query_file_uri = 1; + + // A list of queries. + QueryList query_list = 2; + } + + // Optional. Mapping of query variable names to values (equivalent to the + // Spark SQL command: SET `name="value";`). + map script_variables = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values, used to configure + // Spark SQL's SparkConf. Properties that conflict with values set by the + // Dataproc API may be overwritten. + map properties = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + repeated string jar_file_uris = 56 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// A Dataproc job for running [Apache Pig](https://pig.apache.org/) +// queries on YARN. +message PigJob { + // Required. The sequence of Pig queries to execute, specified as an HCFS + // file URI or a list of queries. + oneof queries { + // The HCFS URI of the script that contains the Pig queries. + string query_file_uri = 1; + + // A list of queries. + QueryList query_list = 2; + } + + // Optional. Whether to continue executing queries if a query fails. + // The default value is `false`. Setting to `true` can be useful when + // executing independent parallel queries. + bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Mapping of query variable names to values (equivalent to the Pig + // command: `name=[value]`). + map script_variables = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values, used to configure Pig. + // Properties that conflict with values set by the Dataproc API may be + // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, + // /etc/pig/conf/pig.properties, and classes in user code. + map properties = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of jar files to add to the CLASSPATH of + // the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. + repeated string jar_file_uris = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL]; +} + +// A Dataproc job for running +// [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html) +// applications on YARN. +message SparkRJob { + // Required. The HCFS URI of the main R file to use as the driver. + // Must be a .R file. + string main_r_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The arguments to pass to the driver. Do not include arguments, + // such as `--conf`, that can be set as job properties, since a collision may + // occur that causes an incorrect job submission. + repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. Useful for naively parallel tasks. + repeated string file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: + // .jar, .tar, .tar.gz, .tgz, and .zip. + repeated string archive_uris = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values, used to configure SparkR. + // Properties that conflict with values set by the Dataproc API may be + // overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + map properties = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// A Dataproc job for running [Presto](https://prestosql.io/) queries. +// **IMPORTANT**: The [Dataproc Presto Optional +// Component](https://cloud.google.com/dataproc/docs/concepts/components/presto) +// must be enabled when the cluster is created to submit a Presto job to the +// cluster. +message PrestoJob { + // Required. The sequence of Presto queries to execute, specified as + // either an HCFS file URI or as a list of queries. + oneof queries { + // The HCFS URI of the script that contains SQL queries. + string query_file_uri = 1; + + // A list of queries. + QueryList query_list = 2; + } + + // Optional. Whether to continue executing queries if a query fails. + // The default value is `false`. Setting to `true` can be useful when + // executing independent parallel queries. + bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The format in which query output will be displayed. See the + // Presto documentation for supported output formats + string output_format = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Presto client tags to attach to this query + repeated string client_tags = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values. Used to set Presto + // [session properties](https://prestodb.io/docs/current/sql/set-session.html) + // Equivalent to using the --session flag in the Presto CLI + map properties = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL]; +} + +// Dataproc job config. +message JobPlacement { + // Required. The name of the cluster where the job will be submitted. + string cluster_name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Output only. A cluster UUID generated by the Dataproc service when + // the job is submitted. + string cluster_uuid = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Cluster labels to identify a cluster where the job will be + // submitted. + map cluster_labels = 3 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Dataproc job status. +message JobStatus { + // The job state. + enum State { + // The job state is unknown. + STATE_UNSPECIFIED = 0; + + // The job is pending; it has been submitted, but is not yet running. + PENDING = 1; + + // Job has been received by the service and completed initial setup; + // it will soon be submitted to the cluster. + SETUP_DONE = 8; + + // The job is running on the cluster. + RUNNING = 2; + + // A CancelJob request has been received, but is pending. + CANCEL_PENDING = 3; + + // Transient in-flight resources have been canceled, and the request to + // cancel the running job has been issued to the cluster. + CANCEL_STARTED = 7; + + // The job cancellation was successful. + CANCELLED = 4; + + // The job has completed successfully. + DONE = 5; + + // The job has completed, but encountered an error. + ERROR = 6; + + // Job attempt has failed. The detail field contains failure details for + // this attempt. + // + // Applies to restartable jobs only. + ATTEMPT_FAILURE = 9; + } + + // The job substate. + enum Substate { + // The job substate is unknown. + UNSPECIFIED = 0; + + // The Job is submitted to the agent. + // + // Applies to RUNNING state. + SUBMITTED = 1; + + // The Job has been received and is awaiting execution (it may be waiting + // for a condition to be met). See the "details" field for the reason for + // the delay. + // + // Applies to RUNNING state. + QUEUED = 2; + + // The agent-reported status is out of date, which may be caused by a + // loss of communication between the agent and Dataproc. If the + // agent does not send a timely update, the job will fail. + // + // Applies to RUNNING state. + STALE_STATUS = 3; + } + + // Output only. A state message specifying the overall job state. + State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Output only. Job state details, such as an error + // description if the state is ERROR. + string details = 2 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Output only. The time when this state was entered. + google.protobuf.Timestamp state_start_time = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Additional state information, which includes + // status reported by the agent. + Substate substate = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Encapsulates the full scoping used to reference a job. +message JobReference { + // Optional. The ID of the Google Cloud Platform project that the job belongs + // to. If specified, must match the request project ID. + string project_id = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The job ID, which must be unique within the project. + // + // The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), or hyphens (-). The maximum length is 100 characters. + // + // If not specified by the caller, the job ID will be provided by the server. + string job_id = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// A YARN application created by a job. Application information is a subset of +// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. +// +// **Beta Feature**: This report is available for testing purposes only. It may +// be changed before final release. +message YarnApplication { + // The application state, corresponding to + // YarnProtos.YarnApplicationStateProto. + enum State { + // Status is unspecified. + STATE_UNSPECIFIED = 0; + + // Status is NEW. + NEW = 1; + + // Status is NEW_SAVING. + NEW_SAVING = 2; + + // Status is SUBMITTED. + SUBMITTED = 3; + + // Status is ACCEPTED. + ACCEPTED = 4; + + // Status is RUNNING. + RUNNING = 5; + + // Status is FINISHED. + FINISHED = 6; + + // Status is FAILED. + FAILED = 7; + + // Status is KILLED. + KILLED = 8; + } + + // Required. The application name. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The application state. + State state = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The numerical progress of the application, from 1 to 100. + float progress = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or + // TimelineServer that provides application-specific information. The URL uses + // the internal hostname, and requires a proxy server for resolution and, + // possibly, access. + string tracking_url = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// A Dataproc job resource. +message Job { + // Optional. The fully qualified reference to the job, which can be used to + // obtain the equivalent REST path of the job resource. If this property + // is not specified when a job is created, the server generates a + // job_id. + JobReference reference = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. Job information, including how, when, and where to + // run the job. + JobPlacement placement = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The application/framework-specific portion of the job. + oneof type_job { + // Optional. Job is a Hadoop job. + HadoopJob hadoop_job = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a Spark job. + SparkJob spark_job = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a PySpark job. + PySparkJob pyspark_job = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a Hive job. + HiveJob hive_job = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a Pig job. + PigJob pig_job = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a SparkR job. + SparkRJob spark_r_job = 21 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a SparkSql job. + SparkSqlJob spark_sql_job = 12 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a Presto job. + PrestoJob presto_job = 23 [(google.api.field_behavior) = OPTIONAL]; + } + + // Output only. The job status. Additional application-specific + // status information may be contained in the type_job + // and yarn_applications fields. + JobStatus status = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The previous job status. + repeated JobStatus status_history = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The collection of YARN applications spun up by this job. + // + // **Beta** Feature: This report is available for testing purposes only. It + // may be changed before final release. + repeated YarnApplication yarn_applications = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A URI pointing to the location of the stdout of the job's + // driver program. + string driver_output_resource_uri = 17 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If present, the location of miscellaneous control files + // which may be used as part of job setup and handling. If not present, + // control files may be placed in the same location as `driver_output_uri`. + string driver_control_files_uri = 15 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The labels to associate with this job. + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to [RFC + // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + // associated with a job. + map labels = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job scheduling configuration. + JobScheduling scheduling = 20 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. A UUID that uniquely identifies a job within the project + // over time. This is in contrast to a user-settable reference.job_id that + // may be reused over time. + string job_uuid = 22 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Indicates whether the job is completed. If the value is + // `false`, the job is still in progress. If `true`, the job is completed, and + // `status.state` field will indicate if it was successful, failed, + // or cancelled. + bool done = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Driver scheduling configuration. + DriverSchedulingConfig driver_scheduling_config = 27 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Driver scheduling configuration. +message DriverSchedulingConfig { + // Required. The amount of memory in MB the driver is requesting. + int32 memory_mb = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The number of vCPUs the driver is requesting. + int32 vcores = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Job scheduling options. +message JobScheduling { + // Optional. Maximum number of times per hour a driver may be restarted as + // a result of driver exiting with non-zero code before job is + // reported failed. + // + // A job may be reported as thrashing if the driver exits with a non-zero code + // four times within a 10-minute window. + // + // Maximum value is 10. + // + // **Note:** This restartable job option is not supported in Dataproc + // [workflow templates] + // (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + int32 max_failures_per_hour = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Maximum total number of times a driver may be restarted as a + // result of the driver exiting with a non-zero code. After the maximum number + // is reached, the job will be reported as failed. + // + // Maximum value is 240. + // + // **Note:** Currently, this restartable job option is + // not supported in Dataproc + // [workflow + // templates](https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). + int32 max_failures_total = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to submit a job. +message SubmitJobRequest { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The job resource. + Job job = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A unique id used to identify the request. If the server + // receives two + // [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s + // with the same id, then the second request will be ignored and the + // first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend + // is returned. + // + // It is recommended to always set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The id must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Job Operation metadata. +message JobMetadata { + // Output only. The job id. + string job_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Most recent job status. + JobStatus status = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Operation type. + string operation_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Job submission time. + google.protobuf.Timestamp start_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// A request to get the resource representation for a job in a project. +message GetJobRequest { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The job ID. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// A request to list jobs in a project. +message ListJobsRequest { + // A matcher that specifies categories of job states. + enum JobStateMatcher { + // Match all jobs, regardless of state. + ALL = 0; + + // Only match jobs in non-terminal states: PENDING, RUNNING, or + // CANCEL_PENDING. + ACTIVE = 1; + + // Only match jobs in terminal states: CANCELLED, DONE, or ERROR. + NON_ACTIVE = 2; + } + + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 6 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The number of results to return in each response. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The page token, returned by a previous call, to request the + // next page of results. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set, the returned jobs list includes only jobs that were + // submitted to the named cluster. + string cluster_name = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies enumerated categories of jobs to list. + // (default = match ALL jobs). + // + // If `filter` is provided, `jobStateMatcher` will be ignored. + JobStateMatcher job_state_matcher = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A filter constraining the jobs to list. Filters are + // case-sensitive and have the following syntax: + // + // [field = value] AND [field [= value]] ... + // + // where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label + // key. **value** can be `*` to match all values. + // `status.state` can be either `ACTIVE` or `NON_ACTIVE`. + // Only the logical `AND` operator is supported; space-separated items are + // treated as having an implicit `AND` operator. + // + // Example filter: + // + // status.state = ACTIVE AND labels.env = staging AND labels.starred = * + string filter = 7 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to update a job. +message UpdateJobRequest { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The job ID. + string job_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The changes to the job. + Job job = 4 [(google.api.field_behavior) = REQUIRED]; + + // Required. Specifies the path, relative to Job, of + // the field to update. For example, to update the labels of a Job the + // update_mask parameter would be specified as + // labels, and the `PATCH` request body would specify the new + // value. Note: Currently, labels is the only + // field that can be updated. + google.protobuf.FieldMask update_mask = 5 + [(google.api.field_behavior) = REQUIRED]; +} + +// A list of jobs in a project. +message ListJobsResponse { + // Output only. Jobs list. + repeated Job jobs = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. This token is included in the response if there are more results + // to fetch. To fetch additional results, provide this value as the + // `page_token` in a subsequent ListJobsRequest. + string next_page_token = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to cancel a job. +message CancelJobRequest { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The job ID. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// A request to delete a job. +message DeleteJobRequest { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Dataproc region in which to handle the request. + string region = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The job ID. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/node_groups.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/node_groups.proto new file mode 100644 index 00000000000..0abc223820c --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/node_groups.proto @@ -0,0 +1,174 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/dataproc/v1/clusters.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "NodeGroupsProto"; +option java_package = "com.google.cloud.dataproc.v1"; +option (google.api.resource_definition) = { + type: "dataproc.googleapis.com/ClusterRegion" + pattern: "projects/{project}/regions/{region}/clusters/{cluster}" +}; + +// The `NodeGroupControllerService` provides methods to manage node groups +// of Compute Engine managed instances. +service NodeGroupController { + option (google.api.default_host) = "dataproc.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a node group in a cluster. The returned + // [Operation.metadata][google.longrunning.Operation.metadata] is + // [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata). + rpc CreateNodeGroup(CreateNodeGroupRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/regions/*/clusters/*}/nodeGroups" + body: "node_group" + }; + option (google.api.method_signature) = "parent,node_group,node_group_id"; + option (google.longrunning.operation_info) = { + response_type: "NodeGroup" + metadata_type: "google.cloud.dataproc.v1.NodeGroupOperationMetadata" + }; + } + + // Resizes a node group in a cluster. The returned + // [Operation.metadata][google.longrunning.Operation.metadata] is + // [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata). + rpc ResizeNodeGroup(ResizeNodeGroupRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{name=projects/*/regions/*/clusters/*/nodeGroups/*}:resize" + body: "*" + }; + option (google.api.method_signature) = "name,size"; + option (google.longrunning.operation_info) = { + response_type: "NodeGroup" + metadata_type: "google.cloud.dataproc.v1.NodeGroupOperationMetadata" + }; + } + + // Gets the resource representation for a node group in a + // cluster. + rpc GetNodeGroup(GetNodeGroupRequest) returns (NodeGroup) { + option (google.api.http) = { + get: "/v1/{name=projects/*/regions/*/clusters/*/nodeGroups/*}" + }; + option (google.api.method_signature) = "name"; + } +} + +// A request to create a node group. +message CreateNodeGroupRequest { + // Required. The parent resource where this node group will be created. + // Format: `projects/{project}/regions/{region}/clusters/{cluster}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "dataproc.googleapis.com/NodeGroup" + } + ]; + + // Required. The node group to create. + NodeGroup node_group = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. An optional node group ID. Generated if not specified. + // + // The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). Cannot begin or end with underscore + // or hyphen. Must consist of from 3 to 33 characters. + string node_group_id = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique ID used to identify the request. If the server receives + // two + // [CreateNodeGroupRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequests) + // with the same ID, the second request is ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. + // + // Recommendation: Set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to resize a node group. +message ResizeNodeGroupRequest { + // Required. The name of the node group to resize. + // Format: + // `projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}` + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The number of running instances for the node group to maintain. + // The group adds or removes instances to maintain the number of instances + // specified by this parameter. + int32 size = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A unique ID used to identify the request. If the server receives + // two + // [ResizeNodeGroupRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.ResizeNodeGroupRequests) + // with the same ID, the second request is ignored and the + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. + // + // Recommendation: Set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Timeout for graceful YARN decommissioning. [Graceful + // decommissioning] + // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) + // allows the removal of nodes from the Compute Engine node group + // without interrupting jobs in progress. This timeout specifies how long to + // wait for jobs in progress to finish before forcefully removing nodes (and + // potentially interrupting jobs). Default timeout is 0 (for forceful + // decommission), and the maximum allowed timeout is 1 day. (see JSON + // representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + // + // Only supported on Dataproc image versions 1.2 and higher. + google.protobuf.Duration graceful_decommission_timeout = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to get a node group . +message GetNodeGroupRequest { + // Required. The name of the node group to retrieve. + // Format: + // `projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "dataproc.googleapis.com/NodeGroup" + } + ]; +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/operations.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/operations.proto new file mode 100644 index 00000000000..854d9eda6ab --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/operations.proto @@ -0,0 +1,166 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1; + +import "google/api/field_behavior.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "OperationsProto"; +option java_package = "com.google.cloud.dataproc.v1"; + +// Metadata describing the Batch operation. +message BatchOperationMetadata { + // Operation type for Batch resources + enum BatchOperationType { + // Batch operation type is unknown. + BATCH_OPERATION_TYPE_UNSPECIFIED = 0; + + // Batch operation type. + BATCH = 1; + } + + // Name of the batch for the operation. + string batch = 1; + + // Batch UUID for the operation. + string batch_uuid = 2; + + // The time when the operation was created. + google.protobuf.Timestamp create_time = 3; + + // The time when the operation finished. + google.protobuf.Timestamp done_time = 4; + + // The operation type. + BatchOperationType operation_type = 6; + + // Short description of the operation. + string description = 7; + + // Labels associated with the operation. + map labels = 8; + + // Warnings encountered during operation execution. + repeated string warnings = 9; +} + +// The status of the operation. +message ClusterOperationStatus { + // The operation state. + enum State { + // Unused. + UNKNOWN = 0; + + // The operation has been created. + PENDING = 1; + + // The operation is running. + RUNNING = 2; + + // The operation is done; either cancelled or completed. + DONE = 3; + } + + // Output only. A message containing the operation state. + State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A message containing the detailed operation state. + string inner_state = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A message containing any operation metadata details. + string details = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time this state was entered. + google.protobuf.Timestamp state_start_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Metadata describing the operation. +message ClusterOperationMetadata { + // Output only. Name of the cluster for the operation. + string cluster_name = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Cluster UUID for the operation. + string cluster_uuid = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Current operation status. + ClusterOperationStatus status = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The previous operation status. + repeated ClusterOperationStatus status_history = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The operation type. + string operation_type = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Short description of operation. + string description = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Labels associated with the operation + map labels = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Errors encountered during operation execution. + repeated string warnings = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Metadata describing the node group operation. +message NodeGroupOperationMetadata { + // Operation type for node group resources. + enum NodeGroupOperationType { + // Node group operation type is unknown. + NODE_GROUP_OPERATION_TYPE_UNSPECIFIED = 0; + + // Create node group operation type. + CREATE = 1; + + // Update node group operation type. + UPDATE = 2; + + // Delete node group operation type. + DELETE = 3; + + // Resize node group operation type. + RESIZE = 4; + } + + // Output only. Node group ID for the operation. + string node_group_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Cluster UUID associated with the node group operation. + string cluster_uuid = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Current operation status. + ClusterOperationStatus status = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The previous operation status. + repeated ClusterOperationStatus status_history = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The operation type. + NodeGroupOperationType operation_type = 5; + + // Output only. Short description of operation. + string description = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Labels associated with the operation. + map labels = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Errors encountered during operation execution. + repeated string warnings = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/shared.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/shared.proto new file mode 100644 index 00000000000..18796915775 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/shared.proto @@ -0,0 +1,341 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1; + +import "google/api/field_behavior.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "SharedProto"; +option java_package = "com.google.cloud.dataproc.v1"; + +// Runtime configuration for a workload. +message RuntimeConfig { + // Optional. Version of the batch runtime. + string version = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Optional custom container image for the job runtime environment. If + // not specified, a default container image will be used. + string container_image = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values, which are used to configure workload + // execution. + map properties = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Environment configuration for a workload. +message EnvironmentConfig { + // Optional. Execution configuration for a workload. + ExecutionConfig execution_config = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Peripherals configuration that workload has access to. + PeripheralsConfig peripherals_config = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// Execution configuration for a workload. +message ExecutionConfig { + // Optional. Service account that used to execute workload. + string service_account = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Network configuration for workload execution. + oneof network { + // Optional. Network URI to connect workload to. + string network_uri = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Subnetwork URI to connect workload to. + string subnetwork_uri = 5 [(google.api.field_behavior) = OPTIONAL]; + } + + // Optional. Tags used for network traffic control. + repeated string network_tags = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Cloud KMS key to use for encryption. + string kms_key = 7 [(google.api.field_behavior) = OPTIONAL]; +} + +// Spark History Server configuration for the workload. +message SparkHistoryServerConfig { + // Optional. Resource name of an existing Dataproc Cluster to act as a Spark History + // Server for the workload. + // + // Example: + // + // * `projects/[project_id]/regions/[region]/clusters/[cluster_name]` + string dataproc_cluster = 1 [ + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Auxiliary services configuration for a workload. +message PeripheralsConfig { + // Optional. Resource name of an existing Dataproc Metastore service. + // + // Example: + // + // * `projects/[project_id]/locations/[region]/services/[service_id]` + string metastore_service = 1 [ + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. The Spark History Server configuration for the workload. + SparkHistoryServerConfig spark_history_server_config = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// Runtime information about workload execution. +message RuntimeInfo { + // Output only. Map of remote access endpoints (such as web interfaces and APIs) to their + // URIs. + map endpoints = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A URI pointing to the location of the stdout and stderr of the workload. + string output_uri = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A URI pointing to the location of the diagnostics tarball. + string diagnostic_output_uri = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// The cluster's GKE config. +message GkeClusterConfig { + // Optional. A target GKE cluster to deploy to. It must be in the same project and + // region as the Dataproc cluster (the GKE cluster can be zonal or regional). + // Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}' + string gke_cluster_target = 2 [ + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. GKE NodePools where workloads will be scheduled. At least one node pool + // must be assigned the 'default' role. Each role can be given to only a + // single NodePoolTarget. All NodePools must have the same location settings. + // If a nodePoolTarget is not specified, Dataproc constructs a default + // nodePoolTarget. + repeated GkeNodePoolTarget node_pool_target = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The configuration for running the Dataproc cluster on Kubernetes. +message KubernetesClusterConfig { + // Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace + // does not exist, it is created. If it exists, Dataproc + // verifies that another Dataproc VirtualCluster is not installed + // into it. If not specified, the name of the Dataproc Cluster is used. + string kubernetes_namespace = 1 [(google.api.field_behavior) = OPTIONAL]; + + oneof config { + // Required. The configuration for running the Dataproc cluster on GKE. + GkeClusterConfig gke_cluster_config = 2 [(google.api.field_behavior) = REQUIRED]; + } + + // Optional. The software configuration for this Dataproc cluster running on Kubernetes. + KubernetesSoftwareConfig kubernetes_software_config = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The software configuration for this Dataproc cluster running on Kubernetes. +message KubernetesSoftwareConfig { + // The components that should be installed in this Dataproc cluster. The key + // must be a string from the KubernetesComponent enumeration. The value is + // the version of the software to be installed. + // At least one entry must be specified. + map component_version = 1; + + // The properties to set on daemon config files. + // + // Property keys are specified in `prefix:property` format, for example + // `spark:spark.kubernetes.container.image`. The following are supported + // prefixes and their mappings: + // + // * spark: `spark-defaults.conf` + // + // For more information, see [Cluster + // properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties). + map properties = 2; +} + +// GKE NodePools that Dataproc workloads run on. +message GkeNodePoolTarget { + // `Role` specifies whose tasks will run on the NodePool. The roles can be + // specific to workloads. Exactly one GkeNodePoolTarget within the + // VirtualCluster must have 'default' role, which is used to run all workloads + // that are not associated with a NodePool. + enum Role { + // Role is unspecified. + ROLE_UNSPECIFIED = 0; + + // Any roles that are not directly assigned to a NodePool run on the + // `default` role's NodePool. + DEFAULT = 1; + + // Run controllers and webhooks. + CONTROLLER = 2; + + // Run spark driver. + SPARK_DRIVER = 3; + + // Run spark executors. + SPARK_EXECUTOR = 4; + } + + // Required. The target GKE NodePool. + // Format: + // 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}' + string node_pool = 1 [ + (google.api.field_behavior) = REQUIRED + ]; + + // Required. The types of role for a GKE NodePool + repeated Role roles = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The configuration for the GKE NodePool. + // + // If specified, Dataproc attempts to create a NodePool with the + // specified shape. If one with the same name already exists, it is + // verified against all specified fields. If a field differs, the + // virtual cluster creation will fail. + // + // If omitted, any NodePool with the specified name is used. If a + // NodePool with the specified name does not exist, Dataproc create a NodePool + // with default values. + GkeNodePoolConfig node_pool_config = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The configuration of a GKE NodePool used by a [Dataproc-on-GKE +// cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster). +message GkeNodePoolConfig { + // Parameters that describe cluster nodes. + message GkeNodeConfig { + // Optional. The name of a Compute Engine [machine + // type](https://cloud.google.com/compute/docs/machine-types). + string machine_type = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Whether the nodes are created as [preemptible VM + // instances](https://cloud.google.com/compute/docs/instances/preemptible). + bool preemptible = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The number of local SSD disks to attach to the node, which is limited by + // the maximum number of disks allowable per zone (see [Adding Local + // SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)). + int32 local_ssd_count = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A list of [hardware + // accelerators](https://cloud.google.com/compute/docs/gpus) to attach to + // each node. + repeated GkeNodePoolAcceleratorConfig accelerators = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. [Minimum CPU + // platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) + // to be used by this instance. The instance may be scheduled on the + // specified or a newer CPU platform. Specify the friendly names of CPU + // platforms, such as "Intel Haswell"` or Intel Sandy Bridge". + string min_cpu_platform = 13 [(google.api.field_behavior) = OPTIONAL]; + } + + // A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request + // for a NodePool. + message GkeNodePoolAcceleratorConfig { + // The number of accelerator cards exposed to an instance. + int64 accelerator_count = 1; + + // The accelerator type resource namename (see GPUs on Compute Engine). + string accelerator_type = 2; + } + + // GkeNodePoolAutoscaling contains information the cluster autoscaler needs to + // adjust the size of the node pool to the current cluster usage. + message GkeNodePoolAutoscalingConfig { + // The minimum number of nodes in the NodePool. Must be >= 0 and <= + // max_node_count. + int32 min_node_count = 2; + + // The maximum number of nodes in the NodePool. Must be >= min_node_count. + // **Note:** Quota must be sufficient to scale up the cluster. + int32 max_node_count = 3; + } + + // Optional. The node pool configuration. + GkeNodeConfig config = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The list of Compute Engine + // [zones](https://cloud.google.com/compute/docs/zones#available) where + // NodePool's nodes will be located. + // + // **Note:** Currently, only one zone may be specified. + // + // If a location is not specified during NodePool creation, Dataproc will + // choose a location. + repeated string locations = 13 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled + // only when a valid configuration is present. + GkeNodePoolAutoscalingConfig autoscaling = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Cluster components that can be activated. +enum Component { + // Unspecified component. Specifying this will cause Cluster creation to fail. + COMPONENT_UNSPECIFIED = 0; + + // The Anaconda python distribution. The Anaconda component is not supported + // in the Dataproc + // 2.0 + // image. The 2.0 image is pre-installed with Miniconda. + ANACONDA = 5; + + // Docker + DOCKER = 13; + + // The Druid query engine. (alpha) + DRUID = 9; + + // Flink + FLINK = 14; + + // HBase. (beta) + HBASE = 11; + + // The Hive Web HCatalog (the REST service for accessing HCatalog). + HIVE_WEBHCAT = 3; + + // The Jupyter Notebook. + JUPYTER = 1; + + // The Presto query engine. + PRESTO = 6; + + // The Ranger service. + RANGER = 12; + + // The Solr service. + SOLR = 10; + + // The Zeppelin notebook. + ZEPPELIN = 4; + + // The Zookeeper service. + ZOOKEEPER = 8; +} + +// Actions in response to failure of a resource associated with a cluster. +enum FailureAction { + // When FailureAction is unspecified, failure action defaults to NO_ACTION. + FAILURE_ACTION_UNSPECIFIED = 0; + + // Take no action on failure to create a cluster resource. NO_ACTION is the + // default. + NO_ACTION = 1; + + // Delete the failed cluster resource. + DELETE = 2; +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/workflow_templates.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/workflow_templates.proto new file mode 100644 index 00000000000..416ba26d03c --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/workflow_templates.proto @@ -0,0 +1,807 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.dataproc.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/dataproc/v1/clusters.proto"; +import "google/cloud/dataproc/v1/jobs.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; +option java_multiple_files = true; +option java_outer_classname = "WorkflowTemplatesProto"; +option java_package = "com.google.cloud.dataproc.v1"; + +// The API interface for managing Workflow Templates in the +// Dataproc API. +service WorkflowTemplateService { + option (google.api.default_host) = "dataproc.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Creates new workflow template. + rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest) returns (WorkflowTemplate) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/locations/*}/workflowTemplates" + body: "template" + additional_bindings { + post: "/v1/{parent=projects/*/regions/*}/workflowTemplates" + body: "template" + } + }; + option (google.api.method_signature) = "parent,template"; + } + + // Retrieves the latest workflow template. + // + // Can retrieve previously instantiated template by specifying optional + // version parameter. + rpc GetWorkflowTemplate(GetWorkflowTemplateRequest) returns (WorkflowTemplate) { + option (google.api.http) = { + get: "/v1/{name=projects/*/locations/*/workflowTemplates/*}" + additional_bindings { + get: "/v1/{name=projects/*/regions/*/workflowTemplates/*}" + } + }; + option (google.api.method_signature) = "name"; + } + + // Instantiates a template and begins execution. + // + // The returned Operation can be used to track execution of + // workflow by polling + // [operations.get][google.longrunning.Operations.GetOperation]. + // The Operation will complete when entire workflow is finished. + // + // The running workflow can be aborted via + // [operations.cancel][google.longrunning.Operations.CancelOperation]. + // This will cause any inflight jobs to be cancelled and workflow-owned + // clusters to be deleted. + // + // The [Operation.metadata][google.longrunning.Operation.metadata] will be + // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + // Also see [Using + // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + // + // On successful completion, + // [Operation.response][google.longrunning.Operation.response] will be + // [Empty][google.protobuf.Empty]. + rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{name=projects/*/locations/*/workflowTemplates/*}:instantiate" + body: "*" + additional_bindings { + post: "/v1/{name=projects/*/regions/*/workflowTemplates/*}:instantiate" + body: "*" + } + }; + option (google.api.method_signature) = "name"; + option (google.api.method_signature) = "name,parameters"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "WorkflowMetadata" + }; + } + + // Instantiates a template and begins execution. + // + // This method is equivalent to executing the sequence + // [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], + // [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. + // + // The returned Operation can be used to track execution of + // workflow by polling + // [operations.get][google.longrunning.Operations.GetOperation]. + // The Operation will complete when entire workflow is finished. + // + // The running workflow can be aborted via + // [operations.cancel][google.longrunning.Operations.CancelOperation]. + // This will cause any inflight jobs to be cancelled and workflow-owned + // clusters to be deleted. + // + // The [Operation.metadata][google.longrunning.Operation.metadata] will be + // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + // Also see [Using + // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + // + // On successful completion, + // [Operation.response][google.longrunning.Operation.response] will be + // [Empty][google.protobuf.Empty]. + rpc InstantiateInlineWorkflowTemplate(InstantiateInlineWorkflowTemplateRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/locations/*}/workflowTemplates:instantiateInline" + body: "template" + additional_bindings { + post: "/v1/{parent=projects/*/regions/*}/workflowTemplates:instantiateInline" + body: "template" + } + }; + option (google.api.method_signature) = "parent,template"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "WorkflowMetadata" + }; + } + + // Updates (replaces) workflow template. The updated template + // must contain version that matches the current server version. + rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest) returns (WorkflowTemplate) { + option (google.api.http) = { + put: "/v1/{template.name=projects/*/locations/*/workflowTemplates/*}" + body: "template" + additional_bindings { + put: "/v1/{template.name=projects/*/regions/*/workflowTemplates/*}" + body: "template" + } + }; + option (google.api.method_signature) = "template"; + } + + // Lists workflows that match the specified filter in the request. + rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest) returns (ListWorkflowTemplatesResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/locations/*}/workflowTemplates" + additional_bindings { + get: "/v1/{parent=projects/*/regions/*}/workflowTemplates" + } + }; + option (google.api.method_signature) = "parent"; + } + + // Deletes a workflow template. It does not cancel in-progress workflows. + rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/locations/*/workflowTemplates/*}" + additional_bindings { + delete: "/v1/{name=projects/*/regions/*/workflowTemplates/*}" + } + }; + option (google.api.method_signature) = "name"; + } +} + +// A Dataproc workflow template resource. +message WorkflowTemplate { + option (google.api.resource) = { + type: "dataproc.googleapis.com/WorkflowTemplate" + pattern: "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}" + pattern: "projects/{project}/locations/{location}/workflowTemplates/{workflow_template}" + history: ORIGINALLY_SINGLE_PATTERN + }; + + string id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The resource name of the workflow template, as described + // in https://cloud.google.com/apis/design/resource_names. + // + // * For `projects.regions.workflowTemplates`, the resource name of the + // template has the following format: + // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + // + // * For `projects.locations.workflowTemplates`, the resource name of the + // template has the following format: + // `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Used to perform a consistent read-modify-write. + // + // This field should be left blank for a `CreateWorkflowTemplate` request. It + // is required for an `UpdateWorkflowTemplate` request, and must match the + // current server version. A typical update template flow would fetch the + // current template with a `GetWorkflowTemplate` request, which will return + // the current template with the `version` field filled in with the + // current server version. The user updates other fields in the template, + // then returns it as part of the `UpdateWorkflowTemplate` request. + int32 version = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The time template was created. + google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time template was last updated. + google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The labels to associate with this template. These labels + // will be propagated to all jobs and clusters created by the workflow + // instance. + // + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // + // No more than 32 labels can be associated with a template. + map labels = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Required. WorkflowTemplate scheduling information. + WorkflowTemplatePlacement placement = 7 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Directed Acyclic Graph of Jobs to submit. + repeated OrderedJob jobs = 8 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Template parameters whose values are substituted into the + // template. Values for parameters must be provided when the template is + // instantiated. + repeated TemplateParameter parameters = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Timeout duration for the DAG of jobs, expressed in seconds (see + // [JSON representation of + // duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + // The timeout duration must be from 10 minutes ("600s") to 24 hours + // ("86400s"). The timer begins when the first job is submitted. If the + // workflow is running at the end of the timeout period, any remaining jobs + // are cancelled, the workflow is ended, and if the workflow was running on a + // [managed + // cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), + // the cluster is deleted. + google.protobuf.Duration dag_timeout = 10 [(google.api.field_behavior) = OPTIONAL]; +} + +// Specifies workflow execution target. +// +// Either `managed_cluster` or `cluster_selector` is required. +message WorkflowTemplatePlacement { + // Required. Specifies where workflow executes; either on a managed + // cluster or an existing cluster chosen by labels. + oneof placement { + // A cluster that is managed by the workflow. + ManagedCluster managed_cluster = 1; + + // Optional. A selector that chooses target cluster for jobs based + // on metadata. + // + // The selector is evaluated at the time each job is submitted. + ClusterSelector cluster_selector = 2; + } +} + +// Cluster that is managed by the workflow. +message ManagedCluster { + // Required. The cluster name prefix. A unique cluster name will be formed by + // appending a random suffix. + // + // The name must contain only lower-case letters (a-z), numbers (0-9), + // and hyphens (-). Must begin with a letter. Cannot begin or end with + // hyphen. Must consist of between 2 and 35 characters. + string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The cluster configuration. + ClusterConfig config = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The labels to associate with this cluster. + // + // Label keys must be between 1 and 63 characters long, and must conform to + // the following PCRE regular expression: + // [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + // + // Label values must be between 1 and 63 characters long, and must conform to + // the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + // + // No more than 32 labels can be associated with a given cluster. + map labels = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// A selector that chooses target cluster for jobs based on metadata. +message ClusterSelector { + // Optional. The zone where workflow process executes. This parameter does not + // affect the selection of the cluster. + // + // If unspecified, the zone of the first cluster matching the selector + // is used. + string zone = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The cluster labels. Cluster must have all labels + // to match. + map cluster_labels = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// A job executed by the workflow. +message OrderedJob { + // Required. The step id. The id must be unique among all jobs + // within the template. + // + // The step id is used as prefix for job id, as job + // `goog-dataproc-workflow-step-id` label, and in + // [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other + // steps. + // + // The id must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). Cannot begin or end with underscore + // or hyphen. Must consist of between 3 and 50 characters. + string step_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The job definition. + oneof job_type { + // Optional. Job is a Hadoop job. + HadoopJob hadoop_job = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a Spark job. + SparkJob spark_job = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a PySpark job. + PySparkJob pyspark_job = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a Hive job. + HiveJob hive_job = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a Pig job. + PigJob pig_job = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a SparkR job. + SparkRJob spark_r_job = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a SparkSql job. + SparkSqlJob spark_sql_job = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a Presto job. + PrestoJob presto_job = 12 [(google.api.field_behavior) = OPTIONAL]; + } + + // Optional. The labels to associate with this job. + // + // Label keys must be between 1 and 63 characters long, and must conform to + // the following regular expression: + // [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + // + // Label values must be between 1 and 63 characters long, and must conform to + // the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + // + // No more than 32 labels can be associated with a given job. + map labels = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job scheduling configuration. + JobScheduling scheduling = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The optional list of prerequisite job step_ids. + // If not specified, the job will start at the beginning of workflow. + repeated string prerequisite_step_ids = 10 [(google.api.field_behavior) = OPTIONAL]; +} + +// A configurable parameter that replaces one or more fields in the template. +// Parameterizable fields: +// - Labels +// - File uris +// - Job properties +// - Job arguments +// - Script variables +// - Main class (in HadoopJob and SparkJob) +// - Zone (in ClusterSelector) +message TemplateParameter { + // Required. Parameter name. + // The parameter name is used as the key, and paired with the + // parameter value, which are passed to the template when the template + // is instantiated. + // The name must contain only capital letters (A-Z), numbers (0-9), and + // underscores (_), and must not start with a number. The maximum length is + // 40 characters. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Paths to all fields that the parameter replaces. + // A field is allowed to appear in at most one parameter's list of field + // paths. + // + // A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask]. + // For example, a field path that references the zone field of a workflow + // template's cluster selector would be specified as + // `placement.clusterSelector.zone`. + // + // Also, field paths can reference fields using the following syntax: + // + // * Values in maps can be referenced by key: + // * labels['key'] + // * placement.clusterSelector.clusterLabels['key'] + // * placement.managedCluster.labels['key'] + // * placement.clusterSelector.clusterLabels['key'] + // * jobs['step-id'].labels['key'] + // + // * Jobs in the jobs list can be referenced by step-id: + // * jobs['step-id'].hadoopJob.mainJarFileUri + // * jobs['step-id'].hiveJob.queryFileUri + // * jobs['step-id'].pySparkJob.mainPythonFileUri + // * jobs['step-id'].hadoopJob.jarFileUris[0] + // * jobs['step-id'].hadoopJob.archiveUris[0] + // * jobs['step-id'].hadoopJob.fileUris[0] + // * jobs['step-id'].pySparkJob.pythonFileUris[0] + // + // * Items in repeated fields can be referenced by a zero-based index: + // * jobs['step-id'].sparkJob.args[0] + // + // * Other examples: + // * jobs['step-id'].hadoopJob.properties['key'] + // * jobs['step-id'].hadoopJob.args[0] + // * jobs['step-id'].hiveJob.scriptVariables['key'] + // * jobs['step-id'].hadoopJob.mainJarFileUri + // * placement.clusterSelector.zone + // + // It may not be possible to parameterize maps and repeated fields in their + // entirety since only individual map values and individual items in repeated + // fields can be referenced. For example, the following field paths are + // invalid: + // + // - placement.clusterSelector.clusterLabels + // - jobs['step-id'].sparkJob.args + repeated string fields = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Brief description of the parameter. + // Must not exceed 1024 characters. + string description = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Validation rules to be applied to this parameter's value. + ParameterValidation validation = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Configuration for parameter validation. +message ParameterValidation { + // Required. The type of validation to be performed. + oneof validation_type { + // Validation based on regular expressions. + RegexValidation regex = 1; + + // Validation based on a list of allowed values. + ValueValidation values = 2; + } +} + +// Validation based on regular expressions. +message RegexValidation { + // Required. RE2 regular expressions used to validate the parameter's value. + // The value must match the regex in its entirety (substring + // matches are not sufficient). + repeated string regexes = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Validation based on a list of allowed values. +message ValueValidation { + // Required. List of allowed values for the parameter. + repeated string values = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// A Dataproc workflow template resource. +message WorkflowMetadata { + // The operation state. + enum State { + // Unused. + UNKNOWN = 0; + + // The operation has been created. + PENDING = 1; + + // The operation is running. + RUNNING = 2; + + // The operation is done; either cancelled or completed. + DONE = 3; + } + + // Output only. The resource name of the workflow template as described + // in https://cloud.google.com/apis/design/resource_names. + // + // * For `projects.regions.workflowTemplates`, the resource name of the + // template has the following format: + // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + // + // * For `projects.locations.workflowTemplates`, the resource name of the + // template has the following format: + // `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` + string template = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The version of template at the time of + // workflow instantiation. + int32 version = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The create cluster operation metadata. + ClusterOperation create_cluster = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The workflow graph. + WorkflowGraph graph = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The delete cluster operation metadata. + ClusterOperation delete_cluster = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The workflow state. + State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The name of the target cluster. + string cluster_name = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Map from parameter names to values that were used for those parameters. + map parameters = 8; + + // Output only. Workflow start time. + google.protobuf.Timestamp start_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Workflow end time. + google.protobuf.Timestamp end_time = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The UUID of target cluster. + string cluster_uuid = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The timeout duration for the DAG of jobs, expressed in seconds (see + // [JSON representation of + // duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Duration dag_timeout = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. DAG start time, only set for workflows with [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when DAG + // begins. + google.protobuf.Timestamp dag_start_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. DAG end time, only set for workflows with [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when DAG ends. + google.protobuf.Timestamp dag_end_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// The cluster operation triggered by a workflow. +message ClusterOperation { + // Output only. The id of the cluster operation. + string operation_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Error, if operation failed. + string error = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Indicates the operation is done. + bool done = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// The workflow graph. +message WorkflowGraph { + // Output only. The workflow nodes. + repeated WorkflowNode nodes = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// The workflow node. +message WorkflowNode { + // The workflow node state. + enum NodeState { + // State is unspecified. + NODE_STATE_UNSPECIFIED = 0; + + // The node is awaiting prerequisite node to finish. + BLOCKED = 1; + + // The node is runnable but not running. + RUNNABLE = 2; + + // The node is running. + RUNNING = 3; + + // The node completed successfully. + COMPLETED = 4; + + // The node failed. A node can be marked FAILED because + // its ancestor or peer failed. + FAILED = 5; + } + + // Output only. The name of the node. + string step_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Node's prerequisite nodes. + repeated string prerequisite_step_ids = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The job id; populated after the node enters RUNNING state. + string job_id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The node state. + NodeState state = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The error detail. + string error = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// A request to create a workflow template. +message CreateWorkflowTemplateRequest { + // Required. The resource name of the region or location, as described + // in https://cloud.google.com/apis/design/resource_names. + // + // * For `projects.regions.workflowTemplates.create`, the resource name of the + // region has the following format: + // `projects/{project_id}/regions/{region}` + // + // * For `projects.locations.workflowTemplates.create`, the resource name of + // the location has the following format: + // `projects/{project_id}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "dataproc.googleapis.com/WorkflowTemplate" + } + ]; + + // Required. The Dataproc workflow template to create. + WorkflowTemplate template = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// A request to fetch a workflow template. +message GetWorkflowTemplateRequest { + // Required. The resource name of the workflow template, as described + // in https://cloud.google.com/apis/design/resource_names. + // + // * For `projects.regions.workflowTemplates.get`, the resource name of the + // template has the following format: + // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + // + // * For `projects.locations.workflowTemplates.get`, the resource name of the + // template has the following format: + // `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "dataproc.googleapis.com/WorkflowTemplate" + } + ]; + + // Optional. The version of workflow template to retrieve. Only previously + // instantiated versions can be retrieved. + // + // If unspecified, retrieves the current version. + int32 version = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to instantiate a workflow template. +message InstantiateWorkflowTemplateRequest { + // Required. The resource name of the workflow template, as described + // in https://cloud.google.com/apis/design/resource_names. + // + // * For `projects.regions.workflowTemplates.instantiate`, the resource name + // of the template has the following format: + // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + // + // * For `projects.locations.workflowTemplates.instantiate`, the resource name + // of the template has the following format: + // `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "dataproc.googleapis.com/WorkflowTemplate" + } + ]; + + // Optional. The version of workflow template to instantiate. If specified, + // the workflow will be instantiated only if the current version of + // the workflow template has the supplied version. + // + // This option cannot be used to instantiate a previous version of + // workflow template. + int32 version = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A tag that prevents multiple concurrent workflow + // instances with the same tag from running. This mitigates risk of + // concurrent instances started due to retries. + // + // It is recommended to always set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The tag must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Map from parameter names to values that should be used for those + // parameters. Values may not exceed 1000 characters. + map parameters = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to instantiate an inline workflow template. +message InstantiateInlineWorkflowTemplateRequest { + // Required. The resource name of the region or location, as described + // in https://cloud.google.com/apis/design/resource_names. + // + // * For `projects.regions.workflowTemplates,instantiateinline`, the resource + // name of the region has the following format: + // `projects/{project_id}/regions/{region}` + // + // * For `projects.locations.workflowTemplates.instantiateinline`, the + // resource name of the location has the following format: + // `projects/{project_id}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "dataproc.googleapis.com/WorkflowTemplate" + } + ]; + + // Required. The workflow template to instantiate. + WorkflowTemplate template = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A tag that prevents multiple concurrent workflow + // instances with the same tag from running. This mitigates risk of + // concurrent instances started due to retries. + // + // It is recommended to always set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The tag must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + string request_id = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// A request to update a workflow template. +message UpdateWorkflowTemplateRequest { + // Required. The updated workflow template. + // + // The `template.version` field must match the current version. + WorkflowTemplate template = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// A request to list workflow templates in a project. +message ListWorkflowTemplatesRequest { + // Required. The resource name of the region or location, as described + // in https://cloud.google.com/apis/design/resource_names. + // + // * For `projects.regions.workflowTemplates,list`, the resource + // name of the region has the following format: + // `projects/{project_id}/regions/{region}` + // + // * For `projects.locations.workflowTemplates.list`, the + // resource name of the location has the following format: + // `projects/{project_id}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "dataproc.googleapis.com/WorkflowTemplate" + } + ]; + + // Optional. The maximum number of results to return in each response. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The page token, returned by a previous call, to request the + // next page of results. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// A response to a request to list workflow templates in a project. +message ListWorkflowTemplatesResponse { + // Output only. WorkflowTemplates list. + repeated WorkflowTemplate templates = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent ListWorkflowTemplatesRequest. + string next_page_token = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// A request to delete a workflow template. +// +// Currently started workflows will remain running. +message DeleteWorkflowTemplateRequest { + // Required. The resource name of the workflow template, as described + // in https://cloud.google.com/apis/design/resource_names. + // + // * For `projects.regions.workflowTemplates.delete`, the resource name + // of the template has the following format: + // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + // + // * For `projects.locations.workflowTemplates.instantiate`, the resource name + // of the template has the following format: + // `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "dataproc.googleapis.com/WorkflowTemplate" + } + ]; + + // Optional. The version of workflow template to delete. If specified, + // will only delete the template if the current server version matches + // specified version. + int32 version = 2 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.create_autoscaling_policy.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.create_autoscaling_policy.js new file mode 100644 index 00000000000..48edf447c8e --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.create_autoscaling_policy.js @@ -0,0 +1,73 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(parent, policy) { + // [START dataproc_v1_generated_AutoscalingPolicyService_CreateAutoscalingPolicy_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The "resource name" of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * * For `projects.regions.autoscalingPolicies.create`, the resource name + * of the region has the following format: + * `projects/{project_id}/regions/{region}` + * * For `projects.locations.autoscalingPolicies.create`, the resource name + * of the location has the following format: + * `projects/{project_id}/locations/{location}` + */ + // const parent = 'abc123' + /** + * Required. The autoscaling policy to create. + */ + // const policy = {} + + // Imports the Dataproc library + const {AutoscalingPolicyServiceClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new AutoscalingPolicyServiceClient(); + + async function callCreateAutoscalingPolicy() { + // Construct request + const request = { + parent, + policy, + }; + + // Run request + const response = await dataprocClient.createAutoscalingPolicy(request); + console.log(response); + } + + callCreateAutoscalingPolicy(); + // [END dataproc_v1_generated_AutoscalingPolicyService_CreateAutoscalingPolicy_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.delete_autoscaling_policy.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.delete_autoscaling_policy.js new file mode 100644 index 00000000000..3a3cbe273f1 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.delete_autoscaling_policy.js @@ -0,0 +1,68 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(name) { + // [START dataproc_v1_generated_AutoscalingPolicyService_DeleteAutoscalingPolicy_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The "resource name" of the autoscaling policy, as described + * in https://cloud.google.com/apis/design/resource_names. + * * For `projects.regions.autoscalingPolicies.delete`, the resource name + * of the policy has the following format: + * `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` + * * For `projects.locations.autoscalingPolicies.delete`, the resource name + * of the policy has the following format: + * `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` + */ + // const name = 'abc123' + + // Imports the Dataproc library + const {AutoscalingPolicyServiceClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new AutoscalingPolicyServiceClient(); + + async function callDeleteAutoscalingPolicy() { + // Construct request + const request = { + name, + }; + + // Run request + const response = await dataprocClient.deleteAutoscalingPolicy(request); + console.log(response); + } + + callDeleteAutoscalingPolicy(); + // [END dataproc_v1_generated_AutoscalingPolicyService_DeleteAutoscalingPolicy_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.get_autoscaling_policy.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.get_autoscaling_policy.js new file mode 100644 index 00000000000..a441db5a600 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.get_autoscaling_policy.js @@ -0,0 +1,68 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(name) { + // [START dataproc_v1_generated_AutoscalingPolicyService_GetAutoscalingPolicy_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The "resource name" of the autoscaling policy, as described + * in https://cloud.google.com/apis/design/resource_names. + * * For `projects.regions.autoscalingPolicies.get`, the resource name + * of the policy has the following format: + * `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` + * * For `projects.locations.autoscalingPolicies.get`, the resource name + * of the policy has the following format: + * `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` + */ + // const name = 'abc123' + + // Imports the Dataproc library + const {AutoscalingPolicyServiceClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new AutoscalingPolicyServiceClient(); + + async function callGetAutoscalingPolicy() { + // Construct request + const request = { + name, + }; + + // Run request + const response = await dataprocClient.getAutoscalingPolicy(request); + console.log(response); + } + + callGetAutoscalingPolicy(); + // [END dataproc_v1_generated_AutoscalingPolicyService_GetAutoscalingPolicy_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.list_autoscaling_policies.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.list_autoscaling_policies.js new file mode 100644 index 00000000000..95e066e414c --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.list_autoscaling_policies.js @@ -0,0 +1,80 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(parent) { + // [START dataproc_v1_generated_AutoscalingPolicyService_ListAutoscalingPolicies_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The "resource name" of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * * For `projects.regions.autoscalingPolicies.list`, the resource name + * of the region has the following format: + * `projects/{project_id}/regions/{region}` + * * For `projects.locations.autoscalingPolicies.list`, the resource name + * of the location has the following format: + * `projects/{project_id}/locations/{location}` + */ + // const parent = 'abc123' + /** + * Optional. The maximum number of results to return in each response. + * Must be less than or equal to 1000. Defaults to 100. + */ + // const pageSize = 1234 + /** + * Optional. The page token, returned by a previous call, to request the + * next page of results. + */ + // const pageToken = 'abc123' + + // Imports the Dataproc library + const {AutoscalingPolicyServiceClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new AutoscalingPolicyServiceClient(); + + async function callListAutoscalingPolicies() { + // Construct request + const request = { + parent, + }; + + // Run request + const iterable = await dataprocClient.listAutoscalingPoliciesAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListAutoscalingPolicies(); + // [END dataproc_v1_generated_AutoscalingPolicyService_ListAutoscalingPolicies_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.update_autoscaling_policy.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.update_autoscaling_policy.js new file mode 100644 index 00000000000..fe51b21bf7d --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.update_autoscaling_policy.js @@ -0,0 +1,61 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(policy) { + // [START dataproc_v1_generated_AutoscalingPolicyService_UpdateAutoscalingPolicy_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The updated autoscaling policy. + */ + // const policy = {} + + // Imports the Dataproc library + const {AutoscalingPolicyServiceClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new AutoscalingPolicyServiceClient(); + + async function callUpdateAutoscalingPolicy() { + // Construct request + const request = { + policy, + }; + + // Run request + const response = await dataprocClient.updateAutoscalingPolicy(request); + console.log(response); + } + + callUpdateAutoscalingPolicy(); + // [END dataproc_v1_generated_AutoscalingPolicyService_UpdateAutoscalingPolicy_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.create_batch.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.create_batch.js new file mode 100644 index 00000000000..d87aff89595 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.create_batch.js @@ -0,0 +1,86 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(parent, batch) { + // [START dataproc_v1_generated_BatchController_CreateBatch_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The parent resource where this batch will be created. + */ + // const parent = 'abc123' + /** + * Required. The batch to create. + */ + // const batch = {} + /** + * Optional. The ID to use for the batch, which will become the final component of + * the batch's resource name. + * This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`. + */ + // const batchId = 'abc123' + /** + * Optional. A unique ID used to identify the request. If the service + * receives two + * CreateBatchRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s + * with the same request_id, the second request is ignored and the + * Operation that corresponds to the first Batch created and stored + * in the backend is returned. + * Recommendation: Set this value to a + * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). + * The value must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + */ + // const requestId = 'abc123' + + // Imports the Dataproc library + const {BatchControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new BatchControllerClient(); + + async function callCreateBatch() { + // Construct request + const request = { + parent, + batch, + }; + + // Run request + const [operation] = await dataprocClient.createBatch(request); + const [response] = await operation.promise(); + console.log(response); + } + + callCreateBatch(); + // [END dataproc_v1_generated_BatchController_CreateBatch_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.delete_batch.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.delete_batch.js new file mode 100644 index 00000000000..d2422e49e04 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.delete_batch.js @@ -0,0 +1,61 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(name) { + // [START dataproc_v1_generated_BatchController_DeleteBatch_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The name of the batch resource to delete. + */ + // const name = 'abc123' + + // Imports the Dataproc library + const {BatchControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new BatchControllerClient(); + + async function callDeleteBatch() { + // Construct request + const request = { + name, + }; + + // Run request + const response = await dataprocClient.deleteBatch(request); + console.log(response); + } + + callDeleteBatch(); + // [END dataproc_v1_generated_BatchController_DeleteBatch_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.get_batch.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.get_batch.js new file mode 100644 index 00000000000..f8e697e1b68 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.get_batch.js @@ -0,0 +1,61 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(name) { + // [START dataproc_v1_generated_BatchController_GetBatch_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The name of the batch to retrieve. + */ + // const name = 'abc123' + + // Imports the Dataproc library + const {BatchControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new BatchControllerClient(); + + async function callGetBatch() { + // Construct request + const request = { + name, + }; + + // Run request + const response = await dataprocClient.getBatch(request); + console.log(response); + } + + callGetBatch(); + // [END dataproc_v1_generated_BatchController_GetBatch_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.list_batches.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.list_batches.js new file mode 100644 index 00000000000..1255756c4f7 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.list_batches.js @@ -0,0 +1,74 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(parent) { + // [START dataproc_v1_generated_BatchController_ListBatches_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The parent, which owns this collection of batches. + */ + // const parent = 'abc123' + /** + * Optional. The maximum number of batches to return in each response. + * The service may return fewer than this value. + * The default page size is 20; the maximum page size is 1000. + */ + // const pageSize = 1234 + /** + * Optional. A page token received from a previous `ListBatches` call. + * Provide this token to retrieve the subsequent page. + */ + // const pageToken = 'abc123' + + // Imports the Dataproc library + const {BatchControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new BatchControllerClient(); + + async function callListBatches() { + // Construct request + const request = { + parent, + }; + + // Run request + const iterable = await dataprocClient.listBatchesAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListBatches(); + // [END dataproc_v1_generated_BatchController_ListBatches_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.create_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.create_cluster.js new file mode 100644 index 00000000000..23d696ca9de --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.create_cluster.js @@ -0,0 +1,90 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region, cluster) { + // [START dataproc_v1_generated_ClusterController_CreateCluster_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project that the cluster + * belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Required. The cluster to create. + */ + // const cluster = {} + /** + * Optional. A unique ID used to identify the request. If the server receives + * two + * CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s + * with the same id, then the second request will be ignored and the + * first google.longrunning.Operation google.longrunning.Operation created + * and stored in the backend is returned. + * It is recommended to always set this value to a + * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + */ + // const requestId = 'abc123' + /** + * Optional. Failure action when primary worker creation fails. + */ + // const actionOnFailedPrimaryWorkers = {} + + // Imports the Dataproc library + const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new ClusterControllerClient(); + + async function callCreateCluster() { + // Construct request + const request = { + projectId, + region, + cluster, + }; + + // Run request + const [operation] = await dataprocClient.createCluster(request); + const [response] = await operation.promise(); + console.log(response); + } + + callCreateCluster(); + // [END dataproc_v1_generated_ClusterController_CreateCluster_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.delete_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.delete_cluster.js new file mode 100644 index 00000000000..0105fcb45ab --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.delete_cluster.js @@ -0,0 +1,91 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region, clusterName) { + // [START dataproc_v1_generated_ClusterController_DeleteCluster_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project that the cluster + * belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Required. The cluster name. + */ + // const clusterName = 'abc123' + /** + * Optional. Specifying the `cluster_uuid` means the RPC should fail + * (with error NOT_FOUND) if cluster with specified UUID does not exist. + */ + // const clusterUuid = 'abc123' + /** + * Optional. A unique ID used to identify the request. If the server + * receives two + * DeleteClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s + * with the same id, then the second request will be ignored and the + * first google.longrunning.Operation google.longrunning.Operation created + * and stored in the backend is returned. + * It is recommended to always set this value to a + * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + */ + // const requestId = 'abc123' + + // Imports the Dataproc library + const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new ClusterControllerClient(); + + async function callDeleteCluster() { + // Construct request + const request = { + projectId, + region, + clusterName, + }; + + // Run request + const [operation] = await dataprocClient.deleteCluster(request); + const [response] = await operation.promise(); + console.log(response); + } + + callDeleteCluster(); + // [END dataproc_v1_generated_ClusterController_DeleteCluster_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.diagnose_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.diagnose_cluster.js new file mode 100644 index 00000000000..4dc33eca599 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.diagnose_cluster.js @@ -0,0 +1,73 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region, clusterName) { + // [START dataproc_v1_generated_ClusterController_DiagnoseCluster_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project that the cluster + * belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Required. The cluster name. + */ + // const clusterName = 'abc123' + + // Imports the Dataproc library + const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new ClusterControllerClient(); + + async function callDiagnoseCluster() { + // Construct request + const request = { + projectId, + region, + clusterName, + }; + + // Run request + const [operation] = await dataprocClient.diagnoseCluster(request); + const [response] = await operation.promise(); + console.log(response); + } + + callDiagnoseCluster(); + // [END dataproc_v1_generated_ClusterController_DiagnoseCluster_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.get_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.get_cluster.js new file mode 100644 index 00000000000..d2cb65227f8 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.get_cluster.js @@ -0,0 +1,72 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region, clusterName) { + // [START dataproc_v1_generated_ClusterController_GetCluster_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project that the cluster + * belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Required. The cluster name. + */ + // const clusterName = 'abc123' + + // Imports the Dataproc library + const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new ClusterControllerClient(); + + async function callGetCluster() { + // Construct request + const request = { + projectId, + region, + clusterName, + }; + + // Run request + const response = await dataprocClient.getCluster(request); + console.log(response); + } + + callGetCluster(); + // [END dataproc_v1_generated_ClusterController_GetCluster_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.list_clusters.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.list_clusters.js new file mode 100644 index 00000000000..aaadff9f99b --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.list_clusters.js @@ -0,0 +1,95 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region) { + // [START dataproc_v1_generated_ClusterController_ListClusters_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project that the cluster + * belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Optional. A filter constraining the clusters to list. Filters are + * case-sensitive and have the following syntax: + * field = value AND field = value ... + * where **field** is one of `status.state`, `clusterName`, or `labels.KEY`, + * and `[KEY]` is a label key. **value** can be `*` to match all values. + * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, + * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` + * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` + * contains the `DELETING` and `ERROR` states. + * `clusterName` is the name of the cluster provided at creation time. + * Only the logical `AND` operator is supported; space-separated items are + * treated as having an implicit `AND` operator. + * Example filter: + * status.state = ACTIVE AND clusterName = mycluster + * AND labels.env = staging AND labels.starred = * + */ + // const filter = 'abc123' + /** + * Optional. The standard List page size. + */ + // const pageSize = 1234 + /** + * Optional. The standard List page token. + */ + // const pageToken = 'abc123' + + // Imports the Dataproc library + const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new ClusterControllerClient(); + + async function callListClusters() { + // Construct request + const request = { + projectId, + region, + }; + + // Run request + const iterable = await dataprocClient.listClustersAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListClusters(); + // [END dataproc_v1_generated_ClusterController_ListClusters_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.start_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.start_cluster.js new file mode 100644 index 00000000000..b79b9108271 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.start_cluster.js @@ -0,0 +1,91 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region, clusterName) { + // [START dataproc_v1_generated_ClusterController_StartCluster_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project the + * cluster belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Required. The cluster name. + */ + // const clusterName = 'abc123' + /** + * Optional. Specifying the `cluster_uuid` means the RPC will fail + * (with error NOT_FOUND) if a cluster with the specified UUID does not exist. + */ + // const clusterUuid = 'abc123' + /** + * Optional. A unique ID used to identify the request. If the server + * receives two + * StartClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s + * with the same id, then the second request will be ignored and the + * first google.longrunning.Operation google.longrunning.Operation created + * and stored in the backend is returned. + * Recommendation: Set this value to a + * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + */ + // const requestId = 'abc123' + + // Imports the Dataproc library + const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new ClusterControllerClient(); + + async function callStartCluster() { + // Construct request + const request = { + projectId, + region, + clusterName, + }; + + // Run request + const [operation] = await dataprocClient.startCluster(request); + const [response] = await operation.promise(); + console.log(response); + } + + callStartCluster(); + // [END dataproc_v1_generated_ClusterController_StartCluster_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.stop_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.stop_cluster.js new file mode 100644 index 00000000000..c3cf29a9dd3 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.stop_cluster.js @@ -0,0 +1,91 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region, clusterName) { + // [START dataproc_v1_generated_ClusterController_StopCluster_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project the + * cluster belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Required. The cluster name. + */ + // const clusterName = 'abc123' + /** + * Optional. Specifying the `cluster_uuid` means the RPC will fail + * (with error NOT_FOUND) if a cluster with the specified UUID does not exist. + */ + // const clusterUuid = 'abc123' + /** + * Optional. A unique ID used to identify the request. If the server + * receives two + * StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s + * with the same id, then the second request will be ignored and the + * first google.longrunning.Operation google.longrunning.Operation created + * and stored in the backend is returned. + * Recommendation: Set this value to a + * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + */ + // const requestId = 'abc123' + + // Imports the Dataproc library + const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new ClusterControllerClient(); + + async function callStopCluster() { + // Construct request + const request = { + projectId, + region, + clusterName, + }; + + // Run request + const [operation] = await dataprocClient.stopCluster(request); + const [response] = await operation.promise(); + console.log(response); + } + + callStopCluster(); + // [END dataproc_v1_generated_ClusterController_StopCluster_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.update_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.update_cluster.js new file mode 100644 index 00000000000..13faa39caa3 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.update_cluster.js @@ -0,0 +1,154 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region, clusterName, cluster, updateMask) { + // [START dataproc_v1_generated_ClusterController_UpdateCluster_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project the + * cluster belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Required. The cluster name. + */ + // const clusterName = 'abc123' + /** + * Required. The changes to the cluster. + */ + // const cluster = {} + /** + * Optional. Timeout for graceful YARN decomissioning. Graceful + * decommissioning allows removing nodes from the cluster without + * interrupting jobs in progress. Timeout specifies how long to wait for jobs + * in progress to finish before forcefully removing nodes (and potentially + * interrupting jobs). Default timeout is 0 (for forceful decommission), and + * the maximum allowed timeout is 1 day. (see JSON representation of + * Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * Only supported on Dataproc image versions 1.2 and higher. + */ + // const gracefulDecommissionTimeout = {} + /** + * Required. Specifies the path, relative to `Cluster`, of + * the field to update. For example, to change the number of workers + * in a cluster to 5, the `update_mask` parameter would be + * specified as `config.worker_config.num_instances`, + * and the `PATCH` request body would specify the new value, as follows: + * { + * "config":{ + * "workerConfig":{ + * "numInstances":"5" + * } + * } + * } + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: + * { + * "config":{ + * "secondaryWorkerConfig":{ + * "numInstances":"5" + * } + * } + * } + * Note: Currently, only the following fields can be updated: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or + * change autoscaling policies
+ */ + // const updateMask = {} + /** + * Optional. A unique ID used to identify the request. If the server + * receives two + * UpdateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s + * with the same id, then the second request will be ignored and the + * first google.longrunning.Operation google.longrunning.Operation created + * and stored in the backend is returned. + * It is recommended to always set this value to a + * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + */ + // const requestId = 'abc123' + + // Imports the Dataproc library + const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new ClusterControllerClient(); + + async function callUpdateCluster() { + // Construct request + const request = { + projectId, + region, + clusterName, + cluster, + updateMask, + }; + + // Run request + const [operation] = await dataprocClient.updateCluster(request); + const [response] = await operation.promise(); + console.log(response); + } + + callUpdateCluster(); + // [END dataproc_v1_generated_ClusterController_UpdateCluster_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.cancel_job.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.cancel_job.js new file mode 100644 index 00000000000..3ef3b314c53 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.cancel_job.js @@ -0,0 +1,72 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region, jobId) { + // [START dataproc_v1_generated_JobController_CancelJob_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Required. The job ID. + */ + // const jobId = 'abc123' + + // Imports the Dataproc library + const {JobControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new JobControllerClient(); + + async function callCancelJob() { + // Construct request + const request = { + projectId, + region, + jobId, + }; + + // Run request + const response = await dataprocClient.cancelJob(request); + console.log(response); + } + + callCancelJob(); + // [END dataproc_v1_generated_JobController_CancelJob_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.delete_job.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.delete_job.js new file mode 100644 index 00000000000..a77ccb12112 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.delete_job.js @@ -0,0 +1,72 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region, jobId) { + // [START dataproc_v1_generated_JobController_DeleteJob_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Required. The job ID. + */ + // const jobId = 'abc123' + + // Imports the Dataproc library + const {JobControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new JobControllerClient(); + + async function callDeleteJob() { + // Construct request + const request = { + projectId, + region, + jobId, + }; + + // Run request + const response = await dataprocClient.deleteJob(request); + console.log(response); + } + + callDeleteJob(); + // [END dataproc_v1_generated_JobController_DeleteJob_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.get_job.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.get_job.js new file mode 100644 index 00000000000..4f45a6ee8e6 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.get_job.js @@ -0,0 +1,72 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region, jobId) { + // [START dataproc_v1_generated_JobController_GetJob_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Required. The job ID. + */ + // const jobId = 'abc123' + + // Imports the Dataproc library + const {JobControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new JobControllerClient(); + + async function callGetJob() { + // Construct request + const request = { + projectId, + region, + jobId, + }; + + // Run request + const response = await dataprocClient.getJob(request); + console.log(response); + } + + callGetJob(); + // [END dataproc_v1_generated_JobController_GetJob_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.list_jobs.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.list_jobs.js new file mode 100644 index 00000000000..7b746d83701 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.list_jobs.js @@ -0,0 +1,102 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region) { + // [START dataproc_v1_generated_JobController_ListJobs_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Optional. The number of results to return in each response. + */ + // const pageSize = 1234 + /** + * Optional. The page token, returned by a previous call, to request the + * next page of results. + */ + // const pageToken = 'abc123' + /** + * Optional. If set, the returned jobs list includes only jobs that were + * submitted to the named cluster. + */ + // const clusterName = 'abc123' + /** + * Optional. Specifies enumerated categories of jobs to list. + * (default = match ALL jobs). + * If `filter` is provided, `jobStateMatcher` will be ignored. + */ + // const jobStateMatcher = {} + /** + * Optional. A filter constraining the jobs to list. Filters are + * case-sensitive and have the following syntax: + * field = value AND field = value ... + * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label + * key. **value** can be `*` to match all values. + * `status.state` can be either `ACTIVE` or `NON_ACTIVE`. + * Only the logical `AND` operator is supported; space-separated items are + * treated as having an implicit `AND` operator. + * Example filter: + * status.state = ACTIVE AND labels.env = staging AND labels.starred = * + */ + // const filter = 'abc123' + + // Imports the Dataproc library + const {JobControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new JobControllerClient(); + + async function callListJobs() { + // Construct request + const request = { + projectId, + region, + }; + + // Run request + const iterable = await dataprocClient.listJobsAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListJobs(); + // [END dataproc_v1_generated_JobController_ListJobs_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job.js new file mode 100644 index 00000000000..10d9ecdabf9 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job.js @@ -0,0 +1,85 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region, job) { + // [START dataproc_v1_generated_JobController_SubmitJob_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Required. The job resource. + */ + // const job = {} + /** + * Optional. A unique id used to identify the request. If the server + * receives two + * SubmitJobRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s + * with the same id, then the second request will be ignored and the + * first Job google.cloud.dataproc.v1.Job created and stored in the backend + * is returned. + * It is recommended to always set this value to a + * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). + * The id must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + */ + // const requestId = 'abc123' + + // Imports the Dataproc library + const {JobControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new JobControllerClient(); + + async function callSubmitJob() { + // Construct request + const request = { + projectId, + region, + job, + }; + + // Run request + const response = await dataprocClient.submitJob(request); + console.log(response); + } + + callSubmitJob(); + // [END dataproc_v1_generated_JobController_SubmitJob_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job_as_operation.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job_as_operation.js new file mode 100644 index 00000000000..9bf24c7d4cc --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job_as_operation.js @@ -0,0 +1,86 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region, job) { + // [START dataproc_v1_generated_JobController_SubmitJobAsOperation_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Required. The job resource. + */ + // const job = {} + /** + * Optional. A unique id used to identify the request. If the server + * receives two + * SubmitJobRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s + * with the same id, then the second request will be ignored and the + * first Job google.cloud.dataproc.v1.Job created and stored in the backend + * is returned. + * It is recommended to always set this value to a + * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). + * The id must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + */ + // const requestId = 'abc123' + + // Imports the Dataproc library + const {JobControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new JobControllerClient(); + + async function callSubmitJobAsOperation() { + // Construct request + const request = { + projectId, + region, + job, + }; + + // Run request + const [operation] = await dataprocClient.submitJobAsOperation(request); + const [response] = await operation.promise(); + console.log(response); + } + + callSubmitJobAsOperation(); + // [END dataproc_v1_generated_JobController_SubmitJobAsOperation_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.update_job.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.update_job.js new file mode 100644 index 00000000000..6c8a77f8b39 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.update_job.js @@ -0,0 +1,87 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, region, jobId, job, updateMask) { + // [START dataproc_v1_generated_JobController_UpdateJob_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + */ + // const projectId = 'abc123' + /** + * Required. The Dataproc region in which to handle the request. + */ + // const region = 'us-central1' + /** + * Required. The job ID. + */ + // const jobId = 'abc123' + /** + * Required. The changes to the job. + */ + // const job = {} + /** + * Required. Specifies the path, relative to Job, of + * the field to update. For example, to update the labels of a Job the + * update_mask parameter would be specified as + * labels, and the `PATCH` request body would specify the new + * value. Note: Currently, labels is the only + * field that can be updated. + */ + // const updateMask = {} + + // Imports the Dataproc library + const {JobControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new JobControllerClient(); + + async function callUpdateJob() { + // Construct request + const request = { + projectId, + region, + jobId, + job, + updateMask, + }; + + // Run request + const response = await dataprocClient.updateJob(request); + console.log(response); + } + + callUpdateJob(); + // [END dataproc_v1_generated_JobController_UpdateJob_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.create_node_group.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.create_node_group.js new file mode 100644 index 00000000000..d0543ee025c --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.create_node_group.js @@ -0,0 +1,88 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(parent, nodeGroup) { + // [START dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The parent resource where this node group will be created. + * Format: `projects/{project}/regions/{region}/clusters/{cluster}` + */ + // const parent = 'abc123' + /** + * Required. The node group to create. + */ + // const nodeGroup = {} + /** + * Optional. An optional node group ID. Generated if not specified. + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). Cannot begin or end with underscore + * or hyphen. Must consist of from 3 to 33 characters. + */ + // const nodeGroupId = 'abc123' + /** + * Optional. A unique ID used to identify the request. If the server receives + * two + * CreateNodeGroupRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequests) + * with the same ID, the second request is ignored and the + * first google.longrunning.Operation google.longrunning.Operation created + * and stored in the backend is returned. + * Recommendation: Set this value to a + * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + */ + // const requestId = 'abc123' + + // Imports the Dataproc library + const {NodeGroupControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new NodeGroupControllerClient(); + + async function callCreateNodeGroup() { + // Construct request + const request = { + parent, + nodeGroup, + }; + + // Run request + const [operation] = await dataprocClient.createNodeGroup(request); + const [response] = await operation.promise(); + console.log(response); + } + + callCreateNodeGroup(); + // [END dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.get_node_group.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.get_node_group.js new file mode 100644 index 00000000000..efddba65abd --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.get_node_group.js @@ -0,0 +1,63 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(name) { + // [START dataproc_v1_generated_NodeGroupController_GetNodeGroup_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The name of the node group to retrieve. + * Format: + * `projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}` + */ + // const name = 'abc123' + + // Imports the Dataproc library + const {NodeGroupControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new NodeGroupControllerClient(); + + async function callGetNodeGroup() { + // Construct request + const request = { + name, + }; + + // Run request + const response = await dataprocClient.getNodeGroup(request); + console.log(response); + } + + callGetNodeGroup(); + // [END dataproc_v1_generated_NodeGroupController_GetNodeGroup_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.resize_node_group.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.resize_node_group.js new file mode 100644 index 00000000000..f9eff7e2ac5 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.resize_node_group.js @@ -0,0 +1,98 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(name, size) { + // [START dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The name of the node group to resize. + * Format: + * `projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}` + */ + // const name = 'abc123' + /** + * Required. The number of running instances for the node group to maintain. + * The group adds or removes instances to maintain the number of instances + * specified by this parameter. + */ + // const size = 1234 + /** + * Optional. A unique ID used to identify the request. If the server receives + * two + * ResizeNodeGroupRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.ResizeNodeGroupRequests) + * with the same ID, the second request is ignored and the + * first google.longrunning.Operation google.longrunning.Operation created + * and stored in the backend is returned. + * Recommendation: Set this value to a + * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + */ + // const requestId = 'abc123' + /** + * Optional. Timeout for graceful YARN decommissioning. Graceful + * decommissioning + * (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) + * allows the removal of nodes from the Compute Engine node group + * without interrupting jobs in progress. This timeout specifies how long to + * wait for jobs in progress to finish before forcefully removing nodes (and + * potentially interrupting jobs). Default timeout is 0 (for forceful + * decommission), and the maximum allowed timeout is 1 day. (see JSON + * representation of + * Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * Only supported on Dataproc image versions 1.2 and higher. + */ + // const gracefulDecommissionTimeout = {} + + // Imports the Dataproc library + const {NodeGroupControllerClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new NodeGroupControllerClient(); + + async function callResizeNodeGroup() { + // Construct request + const request = { + name, + size, + }; + + // Run request + const [operation] = await dataprocClient.resizeNodeGroup(request); + const [response] = await operation.promise(); + console.log(response); + } + + callResizeNodeGroup(); + // [END dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/snippet_metadata.google.cloud.dataproc.v1.json b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/snippet_metadata.google.cloud.dataproc.v1.json new file mode 100644 index 00000000000..64e7abc2fb7 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/snippet_metadata.google.cloud.dataproc.v1.json @@ -0,0 +1,1679 @@ +{ + "clientLibrary": { + "name": "nodejs-dataproc", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.dataproc.v1", + "version": "v1" + } + ] + }, + "snippets": [ + { + "regionTag": "dataproc_v1_generated_AutoscalingPolicyService_CreateAutoscalingPolicy_async", + "title": "dataproc createAutoscalingPolicy Sample", + "origin": "API_DEFINITION", + "description": " Creates new autoscaling policy.", + "canonical": true, + "file": "autoscaling_policy_service.create_autoscaling_policy.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 65, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CreateAutoscalingPolicy", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.CreateAutoscalingPolicy", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "policy", + "type": ".google.cloud.dataproc.v1.AutoscalingPolicy" + } + ], + "resultType": ".google.cloud.dataproc.v1.AutoscalingPolicy", + "client": { + "shortName": "AutoscalingPolicyServiceClient", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyServiceClient" + }, + "method": { + "shortName": "CreateAutoscalingPolicy", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.CreateAutoscalingPolicy", + "service": { + "shortName": "AutoscalingPolicyService", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_AutoscalingPolicyService_UpdateAutoscalingPolicy_async", + "title": "dataproc updateAutoscalingPolicy Sample", + "origin": "API_DEFINITION", + "description": " Updates (replaces) autoscaling policy. Disabled check for update_mask, because all updates will be full replacements.", + "canonical": true, + "file": "autoscaling_policy_service.update_autoscaling_policy.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 53, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "UpdateAutoscalingPolicy", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.UpdateAutoscalingPolicy", + "async": true, + "parameters": [ + { + "name": "policy", + "type": ".google.cloud.dataproc.v1.AutoscalingPolicy" + } + ], + "resultType": ".google.cloud.dataproc.v1.AutoscalingPolicy", + "client": { + "shortName": "AutoscalingPolicyServiceClient", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyServiceClient" + }, + "method": { + "shortName": "UpdateAutoscalingPolicy", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.UpdateAutoscalingPolicy", + "service": { + "shortName": "AutoscalingPolicyService", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_AutoscalingPolicyService_GetAutoscalingPolicy_async", + "title": "dataproc getAutoscalingPolicy Sample", + "origin": "API_DEFINITION", + "description": " Retrieves autoscaling policy.", + "canonical": true, + "file": "autoscaling_policy_service.get_autoscaling_policy.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 60, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetAutoscalingPolicy", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.GetAutoscalingPolicy", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.dataproc.v1.AutoscalingPolicy", + "client": { + "shortName": "AutoscalingPolicyServiceClient", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyServiceClient" + }, + "method": { + "shortName": "GetAutoscalingPolicy", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.GetAutoscalingPolicy", + "service": { + "shortName": "AutoscalingPolicyService", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_AutoscalingPolicyService_ListAutoscalingPolicies_async", + "title": "dataproc listAutoscalingPolicies Sample", + "origin": "API_DEFINITION", + "description": " Lists autoscaling policies in the project.", + "canonical": true, + "file": "autoscaling_policy_service.list_autoscaling_policies.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 72, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListAutoscalingPolicies", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.ListAutoscalingPolicies", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "page_size", + "type": "TYPE_INT32" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.dataproc.v1.ListAutoscalingPoliciesResponse", + "client": { + "shortName": "AutoscalingPolicyServiceClient", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyServiceClient" + }, + "method": { + "shortName": "ListAutoscalingPolicies", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.ListAutoscalingPolicies", + "service": { + "shortName": "AutoscalingPolicyService", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_AutoscalingPolicyService_DeleteAutoscalingPolicy_async", + "title": "dataproc deleteAutoscalingPolicy Sample", + "origin": "API_DEFINITION", + "description": " Deletes an autoscaling policy. It is an error to delete an autoscaling policy that is in use by one or more clusters.", + "canonical": true, + "file": "autoscaling_policy_service.delete_autoscaling_policy.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 60, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteAutoscalingPolicy", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.DeleteAutoscalingPolicy", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "AutoscalingPolicyServiceClient", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyServiceClient" + }, + "method": { + "shortName": "DeleteAutoscalingPolicy", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.DeleteAutoscalingPolicy", + "service": { + "shortName": "AutoscalingPolicyService", + "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_BatchController_CreateBatch_async", + "title": "dataproc createBatch Sample", + "origin": "API_DEFINITION", + "description": " Creates a batch workload that executes asynchronously.", + "canonical": true, + "file": "batch_controller.create_batch.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 78, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CreateBatch", + "fullName": "google.cloud.dataproc.v1.BatchController.CreateBatch", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "batch", + "type": ".google.cloud.dataproc.v1.Batch" + }, + { + "name": "batch_id", + "type": "TYPE_STRING" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "BatchControllerClient", + "fullName": "google.cloud.dataproc.v1.BatchControllerClient" + }, + "method": { + "shortName": "CreateBatch", + "fullName": "google.cloud.dataproc.v1.BatchController.CreateBatch", + "service": { + "shortName": "BatchController", + "fullName": "google.cloud.dataproc.v1.BatchController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_BatchController_GetBatch_async", + "title": "dataproc getBatch Sample", + "origin": "API_DEFINITION", + "description": " Gets the batch workload resource representation.", + "canonical": true, + "file": "batch_controller.get_batch.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 53, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetBatch", + "fullName": "google.cloud.dataproc.v1.BatchController.GetBatch", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.dataproc.v1.Batch", + "client": { + "shortName": "BatchControllerClient", + "fullName": "google.cloud.dataproc.v1.BatchControllerClient" + }, + "method": { + "shortName": "GetBatch", + "fullName": "google.cloud.dataproc.v1.BatchController.GetBatch", + "service": { + "shortName": "BatchController", + "fullName": "google.cloud.dataproc.v1.BatchController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_BatchController_ListBatches_async", + "title": "dataproc listBatches Sample", + "origin": "API_DEFINITION", + "description": " Lists batch workloads.", + "canonical": true, + "file": "batch_controller.list_batches.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 66, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListBatches", + "fullName": "google.cloud.dataproc.v1.BatchController.ListBatches", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "page_size", + "type": "TYPE_INT32" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.dataproc.v1.ListBatchesResponse", + "client": { + "shortName": "BatchControllerClient", + "fullName": "google.cloud.dataproc.v1.BatchControllerClient" + }, + "method": { + "shortName": "ListBatches", + "fullName": "google.cloud.dataproc.v1.BatchController.ListBatches", + "service": { + "shortName": "BatchController", + "fullName": "google.cloud.dataproc.v1.BatchController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_BatchController_DeleteBatch_async", + "title": "dataproc deleteBatch Sample", + "origin": "API_DEFINITION", + "description": " Deletes the batch workload resource. If the batch is not in terminal state, the delete fails and the response returns `FAILED_PRECONDITION`.", + "canonical": true, + "file": "batch_controller.delete_batch.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 53, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteBatch", + "fullName": "google.cloud.dataproc.v1.BatchController.DeleteBatch", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "BatchControllerClient", + "fullName": "google.cloud.dataproc.v1.BatchControllerClient" + }, + "method": { + "shortName": "DeleteBatch", + "fullName": "google.cloud.dataproc.v1.BatchController.DeleteBatch", + "service": { + "shortName": "BatchController", + "fullName": "google.cloud.dataproc.v1.BatchController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_ClusterController_CreateCluster_async", + "title": "dataproc createCluster Sample", + "origin": "API_DEFINITION", + "description": " Creates a cluster in a project. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).", + "canonical": true, + "file": "cluster_controller.create_cluster.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 82, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CreateCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.CreateCluster", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "cluster", + "type": ".google.cloud.dataproc.v1.Cluster" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + }, + { + "name": "action_on_failed_primary_workers", + "type": ".google.cloud.dataproc.v1.FailureAction" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "ClusterControllerClient", + "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" + }, + "method": { + "shortName": "CreateCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.CreateCluster", + "service": { + "shortName": "ClusterController", + "fullName": "google.cloud.dataproc.v1.ClusterController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_ClusterController_UpdateCluster_async", + "title": "dataproc updateCluster Sample", + "origin": "API_DEFINITION", + "description": " Updates a cluster in a project. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). The cluster must be in a [`RUNNING`][google.cloud.dataproc.v1.ClusterStatus.State] state or an error is returned.", + "canonical": true, + "file": "cluster_controller.update_cluster.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 146, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "UpdateCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.UpdateCluster", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "cluster_name", + "type": "TYPE_STRING" + }, + { + "name": "cluster", + "type": ".google.cloud.dataproc.v1.Cluster" + }, + { + "name": "graceful_decommission_timeout", + "type": ".google.protobuf.Duration" + }, + { + "name": "update_mask", + "type": ".google.protobuf.FieldMask" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "ClusterControllerClient", + "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" + }, + "method": { + "shortName": "UpdateCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.UpdateCluster", + "service": { + "shortName": "ClusterController", + "fullName": "google.cloud.dataproc.v1.ClusterController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_ClusterController_StopCluster_async", + "title": "dataproc stopCluster Sample", + "origin": "API_DEFINITION", + "description": " Stops a cluster in a project.", + "canonical": true, + "file": "cluster_controller.stop_cluster.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 83, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "StopCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.StopCluster", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "cluster_name", + "type": "TYPE_STRING" + }, + { + "name": "cluster_uuid", + "type": "TYPE_STRING" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "ClusterControllerClient", + "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" + }, + "method": { + "shortName": "StopCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.StopCluster", + "service": { + "shortName": "ClusterController", + "fullName": "google.cloud.dataproc.v1.ClusterController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_ClusterController_StartCluster_async", + "title": "dataproc startCluster Sample", + "origin": "API_DEFINITION", + "description": " Starts a cluster in a project.", + "canonical": true, + "file": "cluster_controller.start_cluster.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 83, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "StartCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.StartCluster", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "cluster_name", + "type": "TYPE_STRING" + }, + { + "name": "cluster_uuid", + "type": "TYPE_STRING" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "ClusterControllerClient", + "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" + }, + "method": { + "shortName": "StartCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.StartCluster", + "service": { + "shortName": "ClusterController", + "fullName": "google.cloud.dataproc.v1.ClusterController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_ClusterController_DeleteCluster_async", + "title": "dataproc deleteCluster Sample", + "origin": "API_DEFINITION", + "description": " Deletes a cluster in a project. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).", + "canonical": true, + "file": "cluster_controller.delete_cluster.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 83, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.DeleteCluster", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "cluster_name", + "type": "TYPE_STRING" + }, + { + "name": "cluster_uuid", + "type": "TYPE_STRING" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "ClusterControllerClient", + "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" + }, + "method": { + "shortName": "DeleteCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.DeleteCluster", + "service": { + "shortName": "ClusterController", + "fullName": "google.cloud.dataproc.v1.ClusterController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_ClusterController_GetCluster_async", + "title": "dataproc getCluster Sample", + "origin": "API_DEFINITION", + "description": " Gets the resource representation for a cluster in a project.", + "canonical": true, + "file": "cluster_controller.get_cluster.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 64, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.GetCluster", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "cluster_name", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.dataproc.v1.Cluster", + "client": { + "shortName": "ClusterControllerClient", + "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" + }, + "method": { + "shortName": "GetCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.GetCluster", + "service": { + "shortName": "ClusterController", + "fullName": "google.cloud.dataproc.v1.ClusterController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_ClusterController_ListClusters_async", + "title": "dataproc listClusters Sample", + "origin": "API_DEFINITION", + "description": " Lists all regions/{region}/clusters in a project alphabetically.", + "canonical": true, + "file": "cluster_controller.list_clusters.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 87, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListClusters", + "fullName": "google.cloud.dataproc.v1.ClusterController.ListClusters", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "filter", + "type": "TYPE_STRING" + }, + { + "name": "page_size", + "type": "TYPE_INT32" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.dataproc.v1.ListClustersResponse", + "client": { + "shortName": "ClusterControllerClient", + "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" + }, + "method": { + "shortName": "ListClusters", + "fullName": "google.cloud.dataproc.v1.ClusterController.ListClusters", + "service": { + "shortName": "ClusterController", + "fullName": "google.cloud.dataproc.v1.ClusterController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_ClusterController_DiagnoseCluster_async", + "title": "dataproc diagnoseCluster Sample", + "origin": "API_DEFINITION", + "description": " Gets cluster diagnostic information. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). After the operation completes, [Operation.response][google.longrunning.Operation.response] contains [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).", + "canonical": true, + "file": "cluster_controller.diagnose_cluster.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 65, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DiagnoseCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.DiagnoseCluster", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "cluster_name", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "ClusterControllerClient", + "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" + }, + "method": { + "shortName": "DiagnoseCluster", + "fullName": "google.cloud.dataproc.v1.ClusterController.DiagnoseCluster", + "service": { + "shortName": "ClusterController", + "fullName": "google.cloud.dataproc.v1.ClusterController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_JobController_SubmitJob_async", + "title": "dataproc submitJob Sample", + "origin": "API_DEFINITION", + "description": " Submits a job to a cluster.", + "canonical": true, + "file": "job_controller.submit_job.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 77, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "SubmitJob", + "fullName": "google.cloud.dataproc.v1.JobController.SubmitJob", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "job", + "type": ".google.cloud.dataproc.v1.Job" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.dataproc.v1.Job", + "client": { + "shortName": "JobControllerClient", + "fullName": "google.cloud.dataproc.v1.JobControllerClient" + }, + "method": { + "shortName": "SubmitJob", + "fullName": "google.cloud.dataproc.v1.JobController.SubmitJob", + "service": { + "shortName": "JobController", + "fullName": "google.cloud.dataproc.v1.JobController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_JobController_SubmitJobAsOperation_async", + "title": "dataproc submitJobAsOperation Sample", + "origin": "API_DEFINITION", + "description": " Submits job to a cluster.", + "canonical": true, + "file": "job_controller.submit_job_as_operation.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 78, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "SubmitJobAsOperation", + "fullName": "google.cloud.dataproc.v1.JobController.SubmitJobAsOperation", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "job", + "type": ".google.cloud.dataproc.v1.Job" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "JobControllerClient", + "fullName": "google.cloud.dataproc.v1.JobControllerClient" + }, + "method": { + "shortName": "SubmitJobAsOperation", + "fullName": "google.cloud.dataproc.v1.JobController.SubmitJobAsOperation", + "service": { + "shortName": "JobController", + "fullName": "google.cloud.dataproc.v1.JobController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_JobController_GetJob_async", + "title": "dataproc getJob Sample", + "origin": "API_DEFINITION", + "description": " Gets the resource representation for a job in a project.", + "canonical": true, + "file": "job_controller.get_job.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 64, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetJob", + "fullName": "google.cloud.dataproc.v1.JobController.GetJob", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "job_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.dataproc.v1.Job", + "client": { + "shortName": "JobControllerClient", + "fullName": "google.cloud.dataproc.v1.JobControllerClient" + }, + "method": { + "shortName": "GetJob", + "fullName": "google.cloud.dataproc.v1.JobController.GetJob", + "service": { + "shortName": "JobController", + "fullName": "google.cloud.dataproc.v1.JobController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_JobController_ListJobs_async", + "title": "dataproc listJobs Sample", + "origin": "API_DEFINITION", + "description": " Lists regions/{region}/jobs in a project.", + "canonical": true, + "file": "job_controller.list_jobs.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 94, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListJobs", + "fullName": "google.cloud.dataproc.v1.JobController.ListJobs", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "page_size", + "type": "TYPE_INT32" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + }, + { + "name": "cluster_name", + "type": "TYPE_STRING" + }, + { + "name": "job_state_matcher", + "type": ".google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher" + }, + { + "name": "filter", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.dataproc.v1.ListJobsResponse", + "client": { + "shortName": "JobControllerClient", + "fullName": "google.cloud.dataproc.v1.JobControllerClient" + }, + "method": { + "shortName": "ListJobs", + "fullName": "google.cloud.dataproc.v1.JobController.ListJobs", + "service": { + "shortName": "JobController", + "fullName": "google.cloud.dataproc.v1.JobController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_JobController_UpdateJob_async", + "title": "dataproc updateJob Sample", + "origin": "API_DEFINITION", + "description": " Updates a job in a project.", + "canonical": true, + "file": "job_controller.update_job.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 79, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "UpdateJob", + "fullName": "google.cloud.dataproc.v1.JobController.UpdateJob", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "job_id", + "type": "TYPE_STRING" + }, + { + "name": "job", + "type": ".google.cloud.dataproc.v1.Job" + }, + { + "name": "update_mask", + "type": ".google.protobuf.FieldMask" + } + ], + "resultType": ".google.cloud.dataproc.v1.Job", + "client": { + "shortName": "JobControllerClient", + "fullName": "google.cloud.dataproc.v1.JobControllerClient" + }, + "method": { + "shortName": "UpdateJob", + "fullName": "google.cloud.dataproc.v1.JobController.UpdateJob", + "service": { + "shortName": "JobController", + "fullName": "google.cloud.dataproc.v1.JobController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_JobController_CancelJob_async", + "title": "dataproc cancelJob Sample", + "origin": "API_DEFINITION", + "description": " Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).", + "canonical": true, + "file": "job_controller.cancel_job.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 64, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CancelJob", + "fullName": "google.cloud.dataproc.v1.JobController.CancelJob", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "job_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.dataproc.v1.Job", + "client": { + "shortName": "JobControllerClient", + "fullName": "google.cloud.dataproc.v1.JobControllerClient" + }, + "method": { + "shortName": "CancelJob", + "fullName": "google.cloud.dataproc.v1.JobController.CancelJob", + "service": { + "shortName": "JobController", + "fullName": "google.cloud.dataproc.v1.JobController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_JobController_DeleteJob_async", + "title": "dataproc deleteJob Sample", + "origin": "API_DEFINITION", + "description": " Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.", + "canonical": true, + "file": "job_controller.delete_job.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 64, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteJob", + "fullName": "google.cloud.dataproc.v1.JobController.DeleteJob", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "region", + "type": "TYPE_STRING" + }, + { + "name": "job_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "JobControllerClient", + "fullName": "google.cloud.dataproc.v1.JobControllerClient" + }, + "method": { + "shortName": "DeleteJob", + "fullName": "google.cloud.dataproc.v1.JobController.DeleteJob", + "service": { + "shortName": "JobController", + "fullName": "google.cloud.dataproc.v1.JobController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async", + "title": "dataproc createNodeGroup Sample", + "origin": "API_DEFINITION", + "description": " Creates a node group in a cluster. The returned [Operation.metadata][google.longrunning.Operation.metadata] is [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", + "canonical": true, + "file": "node_group_controller.create_node_group.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 80, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CreateNodeGroup", + "fullName": "google.cloud.dataproc.v1.NodeGroupController.CreateNodeGroup", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "node_group", + "type": ".google.cloud.dataproc.v1.NodeGroup" + }, + { + "name": "node_group_id", + "type": "TYPE_STRING" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "NodeGroupControllerClient", + "fullName": "google.cloud.dataproc.v1.NodeGroupControllerClient" + }, + "method": { + "shortName": "CreateNodeGroup", + "fullName": "google.cloud.dataproc.v1.NodeGroupController.CreateNodeGroup", + "service": { + "shortName": "NodeGroupController", + "fullName": "google.cloud.dataproc.v1.NodeGroupController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async", + "title": "dataproc resizeNodeGroup Sample", + "origin": "API_DEFINITION", + "description": " Resizes a node group in a cluster. The returned [Operation.metadata][google.longrunning.Operation.metadata] is [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", + "canonical": true, + "file": "node_group_controller.resize_node_group.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 90, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ResizeNodeGroup", + "fullName": "google.cloud.dataproc.v1.NodeGroupController.ResizeNodeGroup", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + }, + { + "name": "size", + "type": "TYPE_INT32" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + }, + { + "name": "graceful_decommission_timeout", + "type": ".google.protobuf.Duration" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "NodeGroupControllerClient", + "fullName": "google.cloud.dataproc.v1.NodeGroupControllerClient" + }, + "method": { + "shortName": "ResizeNodeGroup", + "fullName": "google.cloud.dataproc.v1.NodeGroupController.ResizeNodeGroup", + "service": { + "shortName": "NodeGroupController", + "fullName": "google.cloud.dataproc.v1.NodeGroupController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_NodeGroupController_GetNodeGroup_async", + "title": "dataproc getNodeGroup Sample", + "origin": "API_DEFINITION", + "description": " Gets the resource representation for a node group in a cluster.", + "canonical": true, + "file": "node_group_controller.get_node_group.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 55, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetNodeGroup", + "fullName": "google.cloud.dataproc.v1.NodeGroupController.GetNodeGroup", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.dataproc.v1.NodeGroup", + "client": { + "shortName": "NodeGroupControllerClient", + "fullName": "google.cloud.dataproc.v1.NodeGroupControllerClient" + }, + "method": { + "shortName": "GetNodeGroup", + "fullName": "google.cloud.dataproc.v1.NodeGroupController.GetNodeGroup", + "service": { + "shortName": "NodeGroupController", + "fullName": "google.cloud.dataproc.v1.NodeGroupController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_WorkflowTemplateService_CreateWorkflowTemplate_async", + "title": "dataproc createWorkflowTemplate Sample", + "origin": "API_DEFINITION", + "description": " Creates new workflow template.", + "canonical": true, + "file": "workflow_template_service.create_workflow_template.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 65, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CreateWorkflowTemplate", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "template", + "type": ".google.cloud.dataproc.v1.WorkflowTemplate" + } + ], + "resultType": ".google.cloud.dataproc.v1.WorkflowTemplate", + "client": { + "shortName": "WorkflowTemplateServiceClient", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" + }, + "method": { + "shortName": "CreateWorkflowTemplate", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate", + "service": { + "shortName": "WorkflowTemplateService", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_WorkflowTemplateService_GetWorkflowTemplate_async", + "title": "dataproc getWorkflowTemplate Sample", + "origin": "API_DEFINITION", + "description": " Retrieves the latest workflow template. Can retrieve previously instantiated template by specifying optional version parameter.", + "canonical": true, + "file": "workflow_template_service.get_workflow_template.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 66, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetWorkflowTemplate", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.GetWorkflowTemplate", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + }, + { + "name": "version", + "type": "TYPE_INT32" + } + ], + "resultType": ".google.cloud.dataproc.v1.WorkflowTemplate", + "client": { + "shortName": "WorkflowTemplateServiceClient", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" + }, + "method": { + "shortName": "GetWorkflowTemplate", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.GetWorkflowTemplate", + "service": { + "shortName": "WorkflowTemplateService", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_WorkflowTemplateService_InstantiateWorkflowTemplate_async", + "title": "dataproc instantiateWorkflowTemplate Sample", + "origin": "API_DEFINITION", + "description": " Instantiates a template and begins execution. The returned Operation can be used to track execution of workflow by polling [operations.get][google.longrunning.Operations.GetOperation]. The Operation will complete when entire workflow is finished. The running workflow can be aborted via [operations.cancel][google.longrunning.Operations.CancelOperation]. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted. The [Operation.metadata][google.longrunning.Operation.metadata] will be [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see [Using WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). On successful completion, [Operation.response][google.longrunning.Operation.response] will be [Empty][google.protobuf.Empty].", + "canonical": true, + "file": "workflow_template_service.instantiate_workflow_template.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 84, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "InstantiateWorkflowTemplate", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + }, + { + "name": "version", + "type": "TYPE_INT32" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + }, + { + "name": "parameters", + "type": "TYPE_MESSAGE[]" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "WorkflowTemplateServiceClient", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" + }, + "method": { + "shortName": "InstantiateWorkflowTemplate", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate", + "service": { + "shortName": "WorkflowTemplateService", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_WorkflowTemplateService_InstantiateInlineWorkflowTemplate_async", + "title": "dataproc instantiateInlineWorkflowTemplate Sample", + "origin": "API_DEFINITION", + "description": " Instantiates a template and begins execution. This method is equivalent to executing the sequence [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. The returned Operation can be used to track execution of workflow by polling [operations.get][google.longrunning.Operations.GetOperation]. The Operation will complete when entire workflow is finished. The running workflow can be aborted via [operations.cancel][google.longrunning.Operations.CancelOperation]. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted. The [Operation.metadata][google.longrunning.Operation.metadata] will be [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see [Using WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). On successful completion, [Operation.response][google.longrunning.Operation.response] will be [Empty][google.protobuf.Empty].", + "canonical": true, + "file": "workflow_template_service.instantiate_inline_workflow_template.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 76, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "InstantiateInlineWorkflowTemplate", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateInlineWorkflowTemplate", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "template", + "type": ".google.cloud.dataproc.v1.WorkflowTemplate" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "WorkflowTemplateServiceClient", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" + }, + "method": { + "shortName": "InstantiateInlineWorkflowTemplate", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateInlineWorkflowTemplate", + "service": { + "shortName": "WorkflowTemplateService", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_WorkflowTemplateService_UpdateWorkflowTemplate_async", + "title": "dataproc updateWorkflowTemplate Sample", + "origin": "API_DEFINITION", + "description": " Updates (replaces) workflow template. The updated template must contain version that matches the current server version.", + "canonical": true, + "file": "workflow_template_service.update_workflow_template.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 54, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "UpdateWorkflowTemplate", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.UpdateWorkflowTemplate", + "async": true, + "parameters": [ + { + "name": "template", + "type": ".google.cloud.dataproc.v1.WorkflowTemplate" + } + ], + "resultType": ".google.cloud.dataproc.v1.WorkflowTemplate", + "client": { + "shortName": "WorkflowTemplateServiceClient", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" + }, + "method": { + "shortName": "UpdateWorkflowTemplate", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.UpdateWorkflowTemplate", + "service": { + "shortName": "WorkflowTemplateService", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_WorkflowTemplateService_ListWorkflowTemplates_async", + "title": "dataproc listWorkflowTemplates Sample", + "origin": "API_DEFINITION", + "description": " Lists workflows that match the specified filter in the request.", + "canonical": true, + "file": "workflow_template_service.list_workflow_templates.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 71, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListWorkflowTemplates", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.ListWorkflowTemplates", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "page_size", + "type": "TYPE_INT32" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.dataproc.v1.ListWorkflowTemplatesResponse", + "client": { + "shortName": "WorkflowTemplateServiceClient", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" + }, + "method": { + "shortName": "ListWorkflowTemplates", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.ListWorkflowTemplates", + "service": { + "shortName": "WorkflowTemplateService", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_WorkflowTemplateService_DeleteWorkflowTemplate_async", + "title": "dataproc deleteWorkflowTemplate Sample", + "origin": "API_DEFINITION", + "description": " Deletes a workflow template. It does not cancel in-progress workflows.", + "canonical": true, + "file": "workflow_template_service.delete_workflow_template.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 66, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteWorkflowTemplate", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + }, + { + "name": "version", + "type": "TYPE_INT32" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "WorkflowTemplateServiceClient", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" + }, + "method": { + "shortName": "DeleteWorkflowTemplate", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate", + "service": { + "shortName": "WorkflowTemplateService", + "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" + } + } + } + } + ] +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.create_workflow_template.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.create_workflow_template.js new file mode 100644 index 00000000000..7f929bc072e --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.create_workflow_template.js @@ -0,0 +1,73 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(parent, template) { + // [START dataproc_v1_generated_WorkflowTemplateService_CreateWorkflowTemplate_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The resource name of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * * For `projects.regions.workflowTemplates.create`, the resource name of the + * region has the following format: + * `projects/{project_id}/regions/{region}` + * * For `projects.locations.workflowTemplates.create`, the resource name of + * the location has the following format: + * `projects/{project_id}/locations/{location}` + */ + // const parent = 'abc123' + /** + * Required. The Dataproc workflow template to create. + */ + // const template = {} + + // Imports the Dataproc library + const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new WorkflowTemplateServiceClient(); + + async function callCreateWorkflowTemplate() { + // Construct request + const request = { + parent, + template, + }; + + // Run request + const response = await dataprocClient.createWorkflowTemplate(request); + console.log(response); + } + + callCreateWorkflowTemplate(); + // [END dataproc_v1_generated_WorkflowTemplateService_CreateWorkflowTemplate_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.delete_workflow_template.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.delete_workflow_template.js new file mode 100644 index 00000000000..5382da622d3 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.delete_workflow_template.js @@ -0,0 +1,74 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(name) { + // [START dataproc_v1_generated_WorkflowTemplateService_DeleteWorkflowTemplate_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The resource name of the workflow template, as described + * in https://cloud.google.com/apis/design/resource_names. + * * For `projects.regions.workflowTemplates.delete`, the resource name + * of the template has the following format: + * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + * * For `projects.locations.workflowTemplates.instantiate`, the resource name + * of the template has the following format: + * `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` + */ + // const name = 'abc123' + /** + * Optional. The version of workflow template to delete. If specified, + * will only delete the template if the current server version matches + * specified version. + */ + // const version = 1234 + + // Imports the Dataproc library + const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new WorkflowTemplateServiceClient(); + + async function callDeleteWorkflowTemplate() { + // Construct request + const request = { + name, + }; + + // Run request + const response = await dataprocClient.deleteWorkflowTemplate(request); + console.log(response); + } + + callDeleteWorkflowTemplate(); + // [END dataproc_v1_generated_WorkflowTemplateService_DeleteWorkflowTemplate_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.get_workflow_template.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.get_workflow_template.js new file mode 100644 index 00000000000..e1aab5cc0fc --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.get_workflow_template.js @@ -0,0 +1,74 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(name) { + // [START dataproc_v1_generated_WorkflowTemplateService_GetWorkflowTemplate_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The resource name of the workflow template, as described + * in https://cloud.google.com/apis/design/resource_names. + * * For `projects.regions.workflowTemplates.get`, the resource name of the + * template has the following format: + * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + * * For `projects.locations.workflowTemplates.get`, the resource name of the + * template has the following format: + * `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` + */ + // const name = 'abc123' + /** + * Optional. The version of workflow template to retrieve. Only previously + * instantiated versions can be retrieved. + * If unspecified, retrieves the current version. + */ + // const version = 1234 + + // Imports the Dataproc library + const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new WorkflowTemplateServiceClient(); + + async function callGetWorkflowTemplate() { + // Construct request + const request = { + name, + }; + + // Run request + const response = await dataprocClient.getWorkflowTemplate(request); + console.log(response); + } + + callGetWorkflowTemplate(); + // [END dataproc_v1_generated_WorkflowTemplateService_GetWorkflowTemplate_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_inline_workflow_template.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_inline_workflow_template.js new file mode 100644 index 00000000000..e7d46fa688f --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_inline_workflow_template.js @@ -0,0 +1,84 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(parent, template) { + // [START dataproc_v1_generated_WorkflowTemplateService_InstantiateInlineWorkflowTemplate_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The resource name of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * * For `projects.regions.workflowTemplates,instantiateinline`, the resource + * name of the region has the following format: + * `projects/{project_id}/regions/{region}` + * * For `projects.locations.workflowTemplates.instantiateinline`, the + * resource name of the location has the following format: + * `projects/{project_id}/locations/{location}` + */ + // const parent = 'abc123' + /** + * Required. The workflow template to instantiate. + */ + // const template = {} + /** + * Optional. A tag that prevents multiple concurrent workflow + * instances with the same tag from running. This mitigates risk of + * concurrent instances started due to retries. + * It is recommended to always set this value to a + * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). + * The tag must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + */ + // const requestId = 'abc123' + + // Imports the Dataproc library + const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new WorkflowTemplateServiceClient(); + + async function callInstantiateInlineWorkflowTemplate() { + // Construct request + const request = { + parent, + template, + }; + + // Run request + const [operation] = await dataprocClient.instantiateInlineWorkflowTemplate(request); + const [response] = await operation.promise(); + console.log(response); + } + + callInstantiateInlineWorkflowTemplate(); + // [END dataproc_v1_generated_WorkflowTemplateService_InstantiateInlineWorkflowTemplate_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_workflow_template.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_workflow_template.js new file mode 100644 index 00000000000..cd4efec87e6 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_workflow_template.js @@ -0,0 +1,92 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(name) { + // [START dataproc_v1_generated_WorkflowTemplateService_InstantiateWorkflowTemplate_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The resource name of the workflow template, as described + * in https://cloud.google.com/apis/design/resource_names. + * * For `projects.regions.workflowTemplates.instantiate`, the resource name + * of the template has the following format: + * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + * * For `projects.locations.workflowTemplates.instantiate`, the resource name + * of the template has the following format: + * `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` + */ + // const name = 'abc123' + /** + * Optional. The version of workflow template to instantiate. If specified, + * the workflow will be instantiated only if the current version of + * the workflow template has the supplied version. + * This option cannot be used to instantiate a previous version of + * workflow template. + */ + // const version = 1234 + /** + * Optional. A tag that prevents multiple concurrent workflow + * instances with the same tag from running. This mitigates risk of + * concurrent instances started due to retries. + * It is recommended to always set this value to a + * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). + * The tag must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + */ + // const requestId = 'abc123' + /** + * Optional. Map from parameter names to values that should be used for those + * parameters. Values may not exceed 1000 characters. + */ + // const parameters = 1234 + + // Imports the Dataproc library + const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new WorkflowTemplateServiceClient(); + + async function callInstantiateWorkflowTemplate() { + // Construct request + const request = { + name, + }; + + // Run request + const [operation] = await dataprocClient.instantiateWorkflowTemplate(request); + const [response] = await operation.promise(); + console.log(response); + } + + callInstantiateWorkflowTemplate(); + // [END dataproc_v1_generated_WorkflowTemplateService_InstantiateWorkflowTemplate_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.list_workflow_templates.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.list_workflow_templates.js new file mode 100644 index 00000000000..31c6c24a4e6 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.list_workflow_templates.js @@ -0,0 +1,79 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(parent) { + // [START dataproc_v1_generated_WorkflowTemplateService_ListWorkflowTemplates_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The resource name of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * * For `projects.regions.workflowTemplates,list`, the resource + * name of the region has the following format: + * `projects/{project_id}/regions/{region}` + * * For `projects.locations.workflowTemplates.list`, the + * resource name of the location has the following format: + * `projects/{project_id}/locations/{location}` + */ + // const parent = 'abc123' + /** + * Optional. The maximum number of results to return in each response. + */ + // const pageSize = 1234 + /** + * Optional. The page token, returned by a previous call, to request the + * next page of results. + */ + // const pageToken = 'abc123' + + // Imports the Dataproc library + const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new WorkflowTemplateServiceClient(); + + async function callListWorkflowTemplates() { + // Construct request + const request = { + parent, + }; + + // Run request + const iterable = await dataprocClient.listWorkflowTemplatesAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListWorkflowTemplates(); + // [END dataproc_v1_generated_WorkflowTemplateService_ListWorkflowTemplates_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.update_workflow_template.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.update_workflow_template.js new file mode 100644 index 00000000000..df1ef7ef62a --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.update_workflow_template.js @@ -0,0 +1,62 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(template) { + // [START dataproc_v1_generated_WorkflowTemplateService_UpdateWorkflowTemplate_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The updated workflow template. + * The `template.version` field must match the current version. + */ + // const template = {} + + // Imports the Dataproc library + const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; + + // Instantiates a client + const dataprocClient = new WorkflowTemplateServiceClient(); + + async function callUpdateWorkflowTemplate() { + // Construct request + const request = { + template, + }; + + // Run request + const response = await dataprocClient.updateWorkflowTemplate(request); + console.log(response); + } + + callUpdateWorkflowTemplate(); + // [END dataproc_v1_generated_WorkflowTemplateService_UpdateWorkflowTemplate_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/index.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/index.ts new file mode 100644 index 00000000000..3ef9be2ad8b --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/index.ts @@ -0,0 +1,35 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as v1 from './v1'; +const AutoscalingPolicyServiceClient = v1.AutoscalingPolicyServiceClient; +type AutoscalingPolicyServiceClient = v1.AutoscalingPolicyServiceClient; +const BatchControllerClient = v1.BatchControllerClient; +type BatchControllerClient = v1.BatchControllerClient; +const ClusterControllerClient = v1.ClusterControllerClient; +type ClusterControllerClient = v1.ClusterControllerClient; +const JobControllerClient = v1.JobControllerClient; +type JobControllerClient = v1.JobControllerClient; +const NodeGroupControllerClient = v1.NodeGroupControllerClient; +type NodeGroupControllerClient = v1.NodeGroupControllerClient; +const WorkflowTemplateServiceClient = v1.WorkflowTemplateServiceClient; +type WorkflowTemplateServiceClient = v1.WorkflowTemplateServiceClient; +export {v1, AutoscalingPolicyServiceClient, BatchControllerClient, ClusterControllerClient, JobControllerClient, NodeGroupControllerClient, WorkflowTemplateServiceClient}; +export default {v1, AutoscalingPolicyServiceClient, BatchControllerClient, ClusterControllerClient, JobControllerClient, NodeGroupControllerClient, WorkflowTemplateServiceClient}; +import * as protos from '../protos/protos'; +export {protos} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client.ts new file mode 100644 index 00000000000..54b02c1e947 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client.ts @@ -0,0 +1,1240 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1/autoscaling_policy_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './autoscaling_policy_service_client_config.json'; +const version = require('../../../package.json').version; + +/** + * The API interface for managing autoscaling policies in the + * Dataproc API. + * @class + * @memberof v1 + */ +export class AutoscalingPolicyServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + pathTemplates: {[name: string]: gax.PathTemplate}; + autoscalingPolicyServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of AutoscalingPolicyServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new AutoscalingPolicyServiceClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof AutoscalingPolicyServiceClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // This API contains "path templates"; forward-slash-separated + // identifiers to uniquely identify resources within the API. + // Create useful helper objects for these. + this.pathTemplates = { + batchPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/batches/{batch}' + ), + locationPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}' + ), + nodeGroupPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' + ), + projectPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}' + ), + projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' + ), + projectLocationWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/workflowTemplates/{workflow_template}' + ), + projectRegionAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}' + ), + projectRegionWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' + ), + }; + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listAutoscalingPolicies: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'policies') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.dataproc.v1.AutoscalingPolicyService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.autoscalingPolicyServiceStub) { + return this.autoscalingPolicyServiceStub; + } + + // Put together the "service stub" for + // google.cloud.dataproc.v1.AutoscalingPolicyService. + this.autoscalingPolicyServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.dataproc.v1.AutoscalingPolicyService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.dataproc.v1.AutoscalingPolicyService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const autoscalingPolicyServiceStubMethods = + ['createAutoscalingPolicy', 'updateAutoscalingPolicy', 'getAutoscalingPolicy', 'listAutoscalingPolicies', 'deleteAutoscalingPolicy']; + for (const methodName of autoscalingPolicyServiceStubMethods) { + const callPromise = this.autoscalingPolicyServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.autoscalingPolicyServiceStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'dataproc.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'dataproc.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Creates new autoscaling policy. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The "resource name" of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.autoscalingPolicies.create`, the resource name + * of the region has the following format: + * `projects/{project_id}/regions/{region}` + * + * * For `projects.locations.autoscalingPolicies.create`, the resource name + * of the location has the following format: + * `projects/{project_id}/locations/{location}` + * @param {google.cloud.dataproc.v1.AutoscalingPolicy} request.policy + * Required. The autoscaling policy to create. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [AutoscalingPolicy]{@link google.cloud.dataproc.v1.AutoscalingPolicy}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/autoscaling_policy_service.create_autoscaling_policy.js + * region_tag:dataproc_v1_generated_AutoscalingPolicyService_CreateAutoscalingPolicy_async + */ + createAutoscalingPolicy( + request?: protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest|undefined, {}|undefined + ]>; + createAutoscalingPolicy( + request: protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>): void; + createAutoscalingPolicy( + request: protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>): void; + createAutoscalingPolicy( + request?: protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + this.initialize(); + return this.innerApiCalls.createAutoscalingPolicy(request, options, callback); + } +/** + * Updates (replaces) autoscaling policy. + * + * Disabled check for update_mask, because all updates will be full + * replacements. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.dataproc.v1.AutoscalingPolicy} request.policy + * Required. The updated autoscaling policy. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [AutoscalingPolicy]{@link google.cloud.dataproc.v1.AutoscalingPolicy}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/autoscaling_policy_service.update_autoscaling_policy.js + * region_tag:dataproc_v1_generated_AutoscalingPolicyService_UpdateAutoscalingPolicy_async + */ + updateAutoscalingPolicy( + request?: protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest|undefined, {}|undefined + ]>; + updateAutoscalingPolicy( + request: protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>): void; + updateAutoscalingPolicy( + request: protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>): void; + updateAutoscalingPolicy( + request?: protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'policy.name': request.policy!.name ?? '', + }); + this.initialize(); + return this.innerApiCalls.updateAutoscalingPolicy(request, options, callback); + } +/** + * Retrieves autoscaling policy. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. The "resource name" of the autoscaling policy, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.autoscalingPolicies.get`, the resource name + * of the policy has the following format: + * `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` + * + * * For `projects.locations.autoscalingPolicies.get`, the resource name + * of the policy has the following format: + * `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [AutoscalingPolicy]{@link google.cloud.dataproc.v1.AutoscalingPolicy}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/autoscaling_policy_service.get_autoscaling_policy.js + * region_tag:dataproc_v1_generated_AutoscalingPolicyService_GetAutoscalingPolicy_async + */ + getAutoscalingPolicy( + request?: protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest|undefined, {}|undefined + ]>; + getAutoscalingPolicy( + request: protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>): void; + getAutoscalingPolicy( + request: protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>): void; + getAutoscalingPolicy( + request?: protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.dataproc.v1.IAutoscalingPolicy, + protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'name': request.name ?? '', + }); + this.initialize(); + return this.innerApiCalls.getAutoscalingPolicy(request, options, callback); + } +/** + * Deletes an autoscaling policy. It is an error to delete an autoscaling + * policy that is in use by one or more clusters. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. The "resource name" of the autoscaling policy, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.autoscalingPolicies.delete`, the resource name + * of the policy has the following format: + * `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` + * + * * For `projects.locations.autoscalingPolicies.delete`, the resource name + * of the policy has the following format: + * `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/autoscaling_policy_service.delete_autoscaling_policy.js + * region_tag:dataproc_v1_generated_AutoscalingPolicyService_DeleteAutoscalingPolicy_async + */ + deleteAutoscalingPolicy( + request?: protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest|undefined, {}|undefined + ]>; + deleteAutoscalingPolicy( + request: protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>): void; + deleteAutoscalingPolicy( + request: protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>): void; + deleteAutoscalingPolicy( + request?: protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'name': request.name ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteAutoscalingPolicy(request, options, callback); + } + + /** + * Lists autoscaling policies in the project. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The "resource name" of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.autoscalingPolicies.list`, the resource name + * of the region has the following format: + * `projects/{project_id}/regions/{region}` + * + * * For `projects.locations.autoscalingPolicies.list`, the resource name + * of the location has the following format: + * `projects/{project_id}/locations/{location}` + * @param {number} [request.pageSize] + * Optional. The maximum number of results to return in each response. + * Must be less than or equal to 1000. Defaults to 100. + * @param {string} [request.pageToken] + * Optional. The page token, returned by a previous call, to request the + * next page of results. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of [AutoscalingPolicy]{@link google.cloud.dataproc.v1.AutoscalingPolicy}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listAutoscalingPoliciesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + */ + listAutoscalingPolicies( + request?: protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IAutoscalingPolicy[], + protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest|null, + protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesResponse + ]>; + listAutoscalingPolicies( + request: protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, + protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesResponse|null|undefined, + protos.google.cloud.dataproc.v1.IAutoscalingPolicy>): void; + listAutoscalingPolicies( + request: protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, + callback: PaginationCallback< + protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, + protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesResponse|null|undefined, + protos.google.cloud.dataproc.v1.IAutoscalingPolicy>): void; + listAutoscalingPolicies( + request?: protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, + protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesResponse|null|undefined, + protos.google.cloud.dataproc.v1.IAutoscalingPolicy>, + callback?: PaginationCallback< + protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, + protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesResponse|null|undefined, + protos.google.cloud.dataproc.v1.IAutoscalingPolicy>): + Promise<[ + protos.google.cloud.dataproc.v1.IAutoscalingPolicy[], + protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest|null, + protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesResponse + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + this.initialize(); + return this.innerApiCalls.listAutoscalingPolicies(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The "resource name" of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.autoscalingPolicies.list`, the resource name + * of the region has the following format: + * `projects/{project_id}/regions/{region}` + * + * * For `projects.locations.autoscalingPolicies.list`, the resource name + * of the location has the following format: + * `projects/{project_id}/locations/{location}` + * @param {number} [request.pageSize] + * Optional. The maximum number of results to return in each response. + * Must be less than or equal to 1000. Defaults to 100. + * @param {string} [request.pageToken] + * Optional. The page token, returned by a previous call, to request the + * next page of results. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing [AutoscalingPolicy]{@link google.cloud.dataproc.v1.AutoscalingPolicy} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listAutoscalingPoliciesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + */ + listAutoscalingPoliciesStream( + request?: protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + const defaultCallSettings = this._defaults['listAutoscalingPolicies']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listAutoscalingPolicies.createStream( + this.innerApiCalls.listAutoscalingPolicies as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listAutoscalingPolicies`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The "resource name" of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.autoscalingPolicies.list`, the resource name + * of the region has the following format: + * `projects/{project_id}/regions/{region}` + * + * * For `projects.locations.autoscalingPolicies.list`, the resource name + * of the location has the following format: + * `projects/{project_id}/locations/{location}` + * @param {number} [request.pageSize] + * Optional. The maximum number of results to return in each response. + * Must be less than or equal to 1000. Defaults to 100. + * @param {string} [request.pageToken] + * Optional. The page token, returned by a previous call, to request the + * next page of results. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows [async iteration](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols). + * When you iterate the returned iterable, each element will be an object representing + * [AutoscalingPolicy]{@link google.cloud.dataproc.v1.AutoscalingPolicy}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + * @example include:samples/generated/v1/autoscaling_policy_service.list_autoscaling_policies.js + * region_tag:dataproc_v1_generated_AutoscalingPolicyService_ListAutoscalingPolicies_async + */ + listAutoscalingPoliciesAsync( + request?: protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + const defaultCallSettings = this._defaults['listAutoscalingPolicies']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listAutoscalingPolicies.asyncIterate( + this.innerApiCalls['listAutoscalingPolicies'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + // -------------------- + // -- Path templates -- + // -------------------- + + /** + * Return a fully-qualified batch resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} batch + * @returns {string} Resource name string. + */ + batchPath(project:string,location:string,batch:string) { + return this.pathTemplates.batchPathTemplate.render({ + project: project, + location: location, + batch: batch, + }); + } + + /** + * Parse the project from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the project. + */ + matchProjectFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).project; + } + + /** + * Parse the location from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the location. + */ + matchLocationFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).location; + } + + /** + * Parse the batch from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the batch. + */ + matchBatchFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).batch; + } + + /** + * Return a fully-qualified location resource name string. + * + * @param {string} project + * @param {string} location + * @returns {string} Resource name string. + */ + locationPath(project:string,location:string) { + return this.pathTemplates.locationPathTemplate.render({ + project: project, + location: location, + }); + } + + /** + * Parse the project from Location resource. + * + * @param {string} locationName + * A fully-qualified path representing Location resource. + * @returns {string} A string representing the project. + */ + matchProjectFromLocationName(locationName: string) { + return this.pathTemplates.locationPathTemplate.match(locationName).project; + } + + /** + * Parse the location from Location resource. + * + * @param {string} locationName + * A fully-qualified path representing Location resource. + * @returns {string} A string representing the location. + */ + matchLocationFromLocationName(locationName: string) { + return this.pathTemplates.locationPathTemplate.match(locationName).location; + } + + /** + * Return a fully-qualified nodeGroup resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} cluster + * @param {string} node_group + * @returns {string} Resource name string. + */ + nodeGroupPath(project:string,region:string,cluster:string,nodeGroup:string) { + return this.pathTemplates.nodeGroupPathTemplate.render({ + project: project, + region: region, + cluster: cluster, + node_group: nodeGroup, + }); + } + + /** + * Parse the project from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the project. + */ + matchProjectFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).project; + } + + /** + * Parse the region from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the region. + */ + matchRegionFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; + } + + /** + * Parse the cluster from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the cluster. + */ + matchClusterFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).cluster; + } + + /** + * Parse the node_group from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the node_group. + */ + matchNodeGroupFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).node_group; + } + + /** + * Return a fully-qualified project resource name string. + * + * @param {string} project + * @returns {string} Resource name string. + */ + projectPath(project:string) { + return this.pathTemplates.projectPathTemplate.render({ + project: project, + }); + } + + /** + * Parse the project from Project resource. + * + * @param {string} projectName + * A fully-qualified path representing Project resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectName(projectName: string) { + return this.pathTemplates.projectPathTemplate.match(projectName).project; + } + + /** + * Return a fully-qualified projectLocationAutoscalingPolicy resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} autoscaling_policy + * @returns {string} Resource name string. + */ + projectLocationAutoscalingPolicyPath(project:string,location:string,autoscalingPolicy:string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render({ + project: project, + location: location, + autoscaling_policy: autoscalingPolicy, + }); + } + + /** + * Parse the project from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).project; + } + + /** + * Parse the location from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the location. + */ + matchLocationFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).location; + } + + /** + * Parse the autoscaling_policy from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the autoscaling_policy. + */ + matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).autoscaling_policy; + } + + /** + * Return a fully-qualified projectLocationWorkflowTemplate resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} workflow_template + * @returns {string} Resource name string. + */ + projectLocationWorkflowTemplatePath(project:string,location:string,workflowTemplate:string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render({ + project: project, + location: location, + workflow_template: workflowTemplate, + }); + } + + /** + * Parse the project from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).project; + } + + /** + * Parse the location from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the location. + */ + matchLocationFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).location; + } + + /** + * Parse the workflow_template from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the workflow_template. + */ + matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).workflow_template; + } + + /** + * Return a fully-qualified projectRegionAutoscalingPolicy resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} autoscaling_policy + * @returns {string} Resource name string. + */ + projectRegionAutoscalingPolicyPath(project:string,region:string,autoscalingPolicy:string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render({ + project: project, + region: region, + autoscaling_policy: autoscalingPolicy, + }); + } + + /** + * Parse the project from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).project; + } + + /** + * Parse the region from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the region. + */ + matchRegionFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).region; + } + + /** + * Parse the autoscaling_policy from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the autoscaling_policy. + */ + matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).autoscaling_policy; + } + + /** + * Return a fully-qualified projectRegionWorkflowTemplate resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} workflow_template + * @returns {string} Resource name string. + */ + projectRegionWorkflowTemplatePath(project:string,region:string,workflowTemplate:string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render({ + project: project, + region: region, + workflow_template: workflowTemplate, + }); + } + + /** + * Parse the project from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).project; + } + + /** + * Parse the region from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the region. + */ + matchRegionFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).region; + } + + /** + * Parse the workflow_template from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the workflow_template. + */ + matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).workflow_template; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.autoscalingPolicyServiceStub && !this._terminated) { + return this.autoscalingPolicyServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client_config.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client_config.json new file mode 100644 index 00000000000..09bd892268f --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client_config.json @@ -0,0 +1,51 @@ +{ + "interfaces": { + "google.cloud.dataproc.v1.AutoscalingPolicyService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "CreateAutoscalingPolicy": { + "timeout_millis": 600000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "UpdateAutoscalingPolicy": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "GetAutoscalingPolicy": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "ListAutoscalingPolicies": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "DeleteAutoscalingPolicy": { + "timeout_millis": 600000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_proto_list.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_proto_list.json new file mode 100644 index 00000000000..3bb7ccf055a --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_proto_list.json @@ -0,0 +1,10 @@ +[ + "../../protos/google/cloud/dataproc/v1/autoscaling_policies.proto", + "../../protos/google/cloud/dataproc/v1/batches.proto", + "../../protos/google/cloud/dataproc/v1/clusters.proto", + "../../protos/google/cloud/dataproc/v1/jobs.proto", + "../../protos/google/cloud/dataproc/v1/node_groups.proto", + "../../protos/google/cloud/dataproc/v1/operations.proto", + "../../protos/google/cloud/dataproc/v1/shared.proto", + "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" +] diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client.ts new file mode 100644 index 00000000000..f172176f692 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client.ts @@ -0,0 +1,1183 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1/batch_controller_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './batch_controller_client_config.json'; +const version = require('../../../package.json').version; + +/** + * The BatchController provides methods to manage batch workloads. + * @class + * @memberof v1 + */ +export class BatchControllerClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + pathTemplates: {[name: string]: gax.PathTemplate}; + operationsClient: gax.OperationsClient; + batchControllerStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of BatchControllerClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new BatchControllerClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof BatchControllerClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // This API contains "path templates"; forward-slash-separated + // identifiers to uniquely identify resources within the API. + // Create useful helper objects for these. + this.pathTemplates = { + batchPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/batches/{batch}' + ), + locationPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}' + ), + nodeGroupPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' + ), + projectPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}' + ), + projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' + ), + projectLocationWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/workflowTemplates/{workflow_template}' + ), + projectRegionAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}' + ), + projectRegionWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' + ), + }; + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listBatches: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'batches') + }; + + const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [{selector: 'google.iam.v1.IAMPolicy.GetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:getIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:getIamPolicy',body: '*',}], + },{selector: 'google.iam.v1.IAMPolicy.SetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:setIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:setIamPolicy',body: '*',}], + },{selector: 'google.iam.v1.IAMPolicy.TestIamPermissions',post: '/v1/{resource=projects/*/regions/*/clusters/*}:testIamPermissions',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:testIamPermissions',body: '*',}], + },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1/{name=projects/*/regions/*/operations/*}:cancel',},{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.ListOperations',get: '/v1/{name=projects/*/regions/*/operations}',}]; + } + this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + const createBatchResponse = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.Batch') as gax.protobuf.Type; + const createBatchMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.BatchOperationMetadata') as gax.protobuf.Type; + + this.descriptors.longrunning = { + createBatch: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + createBatchResponse.decode.bind(createBatchResponse), + createBatchMetadata.decode.bind(createBatchMetadata)) + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.dataproc.v1.BatchController', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.batchControllerStub) { + return this.batchControllerStub; + } + + // Put together the "service stub" for + // google.cloud.dataproc.v1.BatchController. + this.batchControllerStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.dataproc.v1.BatchController') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.dataproc.v1.BatchController, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const batchControllerStubMethods = + ['createBatch', 'getBatch', 'listBatches', 'deleteBatch']; + for (const methodName of batchControllerStubMethods) { + const callPromise = this.batchControllerStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + this.descriptors.longrunning[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.batchControllerStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'dataproc.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'dataproc.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Gets the batch workload resource representation. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. The name of the batch to retrieve. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Batch]{@link google.cloud.dataproc.v1.Batch}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/batch_controller.get_batch.js + * region_tag:dataproc_v1_generated_BatchController_GetBatch_async + */ + getBatch( + request?: protos.google.cloud.dataproc.v1.IGetBatchRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IBatch, + protos.google.cloud.dataproc.v1.IGetBatchRequest|undefined, {}|undefined + ]>; + getBatch( + request: protos.google.cloud.dataproc.v1.IGetBatchRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.IBatch, + protos.google.cloud.dataproc.v1.IGetBatchRequest|null|undefined, + {}|null|undefined>): void; + getBatch( + request: protos.google.cloud.dataproc.v1.IGetBatchRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.IBatch, + protos.google.cloud.dataproc.v1.IGetBatchRequest|null|undefined, + {}|null|undefined>): void; + getBatch( + request?: protos.google.cloud.dataproc.v1.IGetBatchRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.dataproc.v1.IBatch, + protos.google.cloud.dataproc.v1.IGetBatchRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.dataproc.v1.IBatch, + protos.google.cloud.dataproc.v1.IGetBatchRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.dataproc.v1.IBatch, + protos.google.cloud.dataproc.v1.IGetBatchRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'name': request.name ?? '', + }); + this.initialize(); + return this.innerApiCalls.getBatch(request, options, callback); + } +/** + * Deletes the batch workload resource. If the batch is not in terminal state, + * the delete fails and the response returns `FAILED_PRECONDITION`. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. The name of the batch resource to delete. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/batch_controller.delete_batch.js + * region_tag:dataproc_v1_generated_BatchController_DeleteBatch_async + */ + deleteBatch( + request?: protos.google.cloud.dataproc.v1.IDeleteBatchRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteBatchRequest|undefined, {}|undefined + ]>; + deleteBatch( + request: protos.google.cloud.dataproc.v1.IDeleteBatchRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteBatchRequest|null|undefined, + {}|null|undefined>): void; + deleteBatch( + request: protos.google.cloud.dataproc.v1.IDeleteBatchRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteBatchRequest|null|undefined, + {}|null|undefined>): void; + deleteBatch( + request?: protos.google.cloud.dataproc.v1.IDeleteBatchRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteBatchRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteBatchRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteBatchRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'name': request.name ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteBatch(request, options, callback); + } + +/** + * Creates a batch workload that executes asynchronously. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The parent resource where this batch will be created. + * @param {google.cloud.dataproc.v1.Batch} request.batch + * Required. The batch to create. + * @param {string} [request.batchId] + * Optional. The ID to use for the batch, which will become the final component of + * the batch's resource name. + * + * This value must be 4-63 characters. Valid characters are `/{@link 0-9|a-z}-/`. + * @param {string} [request.requestId] + * Optional. A unique ID used to identify the request. If the service + * receives two + * [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s + * with the same request_id, the second request is ignored and the + * Operation that corresponds to the first Batch created and stored + * in the backend is returned. + * + * Recommendation: Set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The value must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/batch_controller.create_batch.js + * region_tag:dataproc_v1_generated_BatchController_CreateBatch_async + */ + createBatch( + request?: protos.google.cloud.dataproc.v1.ICreateBatchRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + createBatch( + request: protos.google.cloud.dataproc.v1.ICreateBatchRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + createBatch( + request: protos.google.cloud.dataproc.v1.ICreateBatchRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + createBatch( + request?: protos.google.cloud.dataproc.v1.ICreateBatchRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + this.initialize(); + return this.innerApiCalls.createBatch(request, options, callback); + } +/** + * Check the status of the long running operation returned by `createBatch()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/batch_controller.create_batch.js + * region_tag:dataproc_v1_generated_BatchController_CreateBatch_async + */ + async checkCreateBatchProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.createBatch, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } + /** + * Lists batch workloads. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The parent, which owns this collection of batches. + * @param {number} [request.pageSize] + * Optional. The maximum number of batches to return in each response. + * The service may return fewer than this value. + * The default page size is 20; the maximum page size is 1000. + * @param {string} [request.pageToken] + * Optional. A page token received from a previous `ListBatches` call. + * Provide this token to retrieve the subsequent page. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of [Batch]{@link google.cloud.dataproc.v1.Batch}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listBatchesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + */ + listBatches( + request?: protos.google.cloud.dataproc.v1.IListBatchesRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IBatch[], + protos.google.cloud.dataproc.v1.IListBatchesRequest|null, + protos.google.cloud.dataproc.v1.IListBatchesResponse + ]>; + listBatches( + request: protos.google.cloud.dataproc.v1.IListBatchesRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.dataproc.v1.IListBatchesRequest, + protos.google.cloud.dataproc.v1.IListBatchesResponse|null|undefined, + protos.google.cloud.dataproc.v1.IBatch>): void; + listBatches( + request: protos.google.cloud.dataproc.v1.IListBatchesRequest, + callback: PaginationCallback< + protos.google.cloud.dataproc.v1.IListBatchesRequest, + protos.google.cloud.dataproc.v1.IListBatchesResponse|null|undefined, + protos.google.cloud.dataproc.v1.IBatch>): void; + listBatches( + request?: protos.google.cloud.dataproc.v1.IListBatchesRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.dataproc.v1.IListBatchesRequest, + protos.google.cloud.dataproc.v1.IListBatchesResponse|null|undefined, + protos.google.cloud.dataproc.v1.IBatch>, + callback?: PaginationCallback< + protos.google.cloud.dataproc.v1.IListBatchesRequest, + protos.google.cloud.dataproc.v1.IListBatchesResponse|null|undefined, + protos.google.cloud.dataproc.v1.IBatch>): + Promise<[ + protos.google.cloud.dataproc.v1.IBatch[], + protos.google.cloud.dataproc.v1.IListBatchesRequest|null, + protos.google.cloud.dataproc.v1.IListBatchesResponse + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + this.initialize(); + return this.innerApiCalls.listBatches(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The parent, which owns this collection of batches. + * @param {number} [request.pageSize] + * Optional. The maximum number of batches to return in each response. + * The service may return fewer than this value. + * The default page size is 20; the maximum page size is 1000. + * @param {string} [request.pageToken] + * Optional. A page token received from a previous `ListBatches` call. + * Provide this token to retrieve the subsequent page. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing [Batch]{@link google.cloud.dataproc.v1.Batch} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listBatchesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + */ + listBatchesStream( + request?: protos.google.cloud.dataproc.v1.IListBatchesRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + const defaultCallSettings = this._defaults['listBatches']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listBatches.createStream( + this.innerApiCalls.listBatches as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listBatches`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The parent, which owns this collection of batches. + * @param {number} [request.pageSize] + * Optional. The maximum number of batches to return in each response. + * The service may return fewer than this value. + * The default page size is 20; the maximum page size is 1000. + * @param {string} [request.pageToken] + * Optional. A page token received from a previous `ListBatches` call. + * Provide this token to retrieve the subsequent page. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows [async iteration](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols). + * When you iterate the returned iterable, each element will be an object representing + * [Batch]{@link google.cloud.dataproc.v1.Batch}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + * @example include:samples/generated/v1/batch_controller.list_batches.js + * region_tag:dataproc_v1_generated_BatchController_ListBatches_async + */ + listBatchesAsync( + request?: protos.google.cloud.dataproc.v1.IListBatchesRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + const defaultCallSettings = this._defaults['listBatches']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listBatches.asyncIterate( + this.innerApiCalls['listBatches'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + // -------------------- + // -- Path templates -- + // -------------------- + + /** + * Return a fully-qualified batch resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} batch + * @returns {string} Resource name string. + */ + batchPath(project:string,location:string,batch:string) { + return this.pathTemplates.batchPathTemplate.render({ + project: project, + location: location, + batch: batch, + }); + } + + /** + * Parse the project from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the project. + */ + matchProjectFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).project; + } + + /** + * Parse the location from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the location. + */ + matchLocationFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).location; + } + + /** + * Parse the batch from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the batch. + */ + matchBatchFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).batch; + } + + /** + * Return a fully-qualified location resource name string. + * + * @param {string} project + * @param {string} location + * @returns {string} Resource name string. + */ + locationPath(project:string,location:string) { + return this.pathTemplates.locationPathTemplate.render({ + project: project, + location: location, + }); + } + + /** + * Parse the project from Location resource. + * + * @param {string} locationName + * A fully-qualified path representing Location resource. + * @returns {string} A string representing the project. + */ + matchProjectFromLocationName(locationName: string) { + return this.pathTemplates.locationPathTemplate.match(locationName).project; + } + + /** + * Parse the location from Location resource. + * + * @param {string} locationName + * A fully-qualified path representing Location resource. + * @returns {string} A string representing the location. + */ + matchLocationFromLocationName(locationName: string) { + return this.pathTemplates.locationPathTemplate.match(locationName).location; + } + + /** + * Return a fully-qualified nodeGroup resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} cluster + * @param {string} node_group + * @returns {string} Resource name string. + */ + nodeGroupPath(project:string,region:string,cluster:string,nodeGroup:string) { + return this.pathTemplates.nodeGroupPathTemplate.render({ + project: project, + region: region, + cluster: cluster, + node_group: nodeGroup, + }); + } + + /** + * Parse the project from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the project. + */ + matchProjectFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).project; + } + + /** + * Parse the region from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the region. + */ + matchRegionFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; + } + + /** + * Parse the cluster from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the cluster. + */ + matchClusterFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).cluster; + } + + /** + * Parse the node_group from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the node_group. + */ + matchNodeGroupFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).node_group; + } + + /** + * Return a fully-qualified project resource name string. + * + * @param {string} project + * @returns {string} Resource name string. + */ + projectPath(project:string) { + return this.pathTemplates.projectPathTemplate.render({ + project: project, + }); + } + + /** + * Parse the project from Project resource. + * + * @param {string} projectName + * A fully-qualified path representing Project resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectName(projectName: string) { + return this.pathTemplates.projectPathTemplate.match(projectName).project; + } + + /** + * Return a fully-qualified projectLocationAutoscalingPolicy resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} autoscaling_policy + * @returns {string} Resource name string. + */ + projectLocationAutoscalingPolicyPath(project:string,location:string,autoscalingPolicy:string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render({ + project: project, + location: location, + autoscaling_policy: autoscalingPolicy, + }); + } + + /** + * Parse the project from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).project; + } + + /** + * Parse the location from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the location. + */ + matchLocationFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).location; + } + + /** + * Parse the autoscaling_policy from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the autoscaling_policy. + */ + matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).autoscaling_policy; + } + + /** + * Return a fully-qualified projectLocationWorkflowTemplate resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} workflow_template + * @returns {string} Resource name string. + */ + projectLocationWorkflowTemplatePath(project:string,location:string,workflowTemplate:string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render({ + project: project, + location: location, + workflow_template: workflowTemplate, + }); + } + + /** + * Parse the project from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).project; + } + + /** + * Parse the location from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the location. + */ + matchLocationFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).location; + } + + /** + * Parse the workflow_template from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the workflow_template. + */ + matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).workflow_template; + } + + /** + * Return a fully-qualified projectRegionAutoscalingPolicy resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} autoscaling_policy + * @returns {string} Resource name string. + */ + projectRegionAutoscalingPolicyPath(project:string,region:string,autoscalingPolicy:string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render({ + project: project, + region: region, + autoscaling_policy: autoscalingPolicy, + }); + } + + /** + * Parse the project from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).project; + } + + /** + * Parse the region from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the region. + */ + matchRegionFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).region; + } + + /** + * Parse the autoscaling_policy from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the autoscaling_policy. + */ + matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).autoscaling_policy; + } + + /** + * Return a fully-qualified projectRegionWorkflowTemplate resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} workflow_template + * @returns {string} Resource name string. + */ + projectRegionWorkflowTemplatePath(project:string,region:string,workflowTemplate:string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render({ + project: project, + region: region, + workflow_template: workflowTemplate, + }); + } + + /** + * Parse the project from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).project; + } + + /** + * Parse the region from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the region. + */ + matchRegionFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).region; + } + + /** + * Parse the workflow_template from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the workflow_template. + */ + matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).workflow_template; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.batchControllerStub && !this._terminated) { + return this.batchControllerStub.then(stub => { + this._terminated = true; + stub.close(); + this.operationsClient.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client_config.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client_config.json new file mode 100644 index 00000000000..a451087cbb2 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client_config.json @@ -0,0 +1,42 @@ +{ + "interfaces": { + "google.cloud.dataproc.v1.BatchController": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "CreateBatch": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "GetBatch": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ListBatches": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "DeleteBatch": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_proto_list.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_proto_list.json new file mode 100644 index 00000000000..3bb7ccf055a --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_proto_list.json @@ -0,0 +1,10 @@ +[ + "../../protos/google/cloud/dataproc/v1/autoscaling_policies.proto", + "../../protos/google/cloud/dataproc/v1/batches.proto", + "../../protos/google/cloud/dataproc/v1/clusters.proto", + "../../protos/google/cloud/dataproc/v1/jobs.proto", + "../../protos/google/cloud/dataproc/v1/node_groups.proto", + "../../protos/google/cloud/dataproc/v1/operations.proto", + "../../protos/google/cloud/dataproc/v1/shared.proto", + "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" +] diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client.ts new file mode 100644 index 00000000000..15d1aa57e3e --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client.ts @@ -0,0 +1,1840 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1/cluster_controller_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './cluster_controller_client_config.json'; +const version = require('../../../package.json').version; + +/** + * The ClusterControllerService provides methods to manage clusters + * of Compute Engine instances. + * @class + * @memberof v1 + */ +export class ClusterControllerClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + pathTemplates: {[name: string]: gax.PathTemplate}; + operationsClient: gax.OperationsClient; + clusterControllerStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of ClusterControllerClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new ClusterControllerClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof ClusterControllerClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // This API contains "path templates"; forward-slash-separated + // identifiers to uniquely identify resources within the API. + // Create useful helper objects for these. + this.pathTemplates = { + batchPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/batches/{batch}' + ), + nodeGroupPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' + ), + projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' + ), + projectLocationWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/workflowTemplates/{workflow_template}' + ), + projectRegionAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}' + ), + projectRegionWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' + ), + servicePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/services/{service}' + ), + }; + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listClusters: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'clusters') + }; + + const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [{selector: 'google.iam.v1.IAMPolicy.GetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:getIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:getIamPolicy',body: '*',}], + },{selector: 'google.iam.v1.IAMPolicy.SetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:setIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:setIamPolicy',body: '*',}], + },{selector: 'google.iam.v1.IAMPolicy.TestIamPermissions',post: '/v1/{resource=projects/*/regions/*/clusters/*}:testIamPermissions',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:testIamPermissions',body: '*',}], + },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1/{name=projects/*/regions/*/operations/*}:cancel',},{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.ListOperations',get: '/v1/{name=projects/*/regions/*/operations}',}]; + } + this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + const createClusterResponse = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.Cluster') as gax.protobuf.Type; + const createClusterMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.ClusterOperationMetadata') as gax.protobuf.Type; + const updateClusterResponse = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.Cluster') as gax.protobuf.Type; + const updateClusterMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.ClusterOperationMetadata') as gax.protobuf.Type; + const stopClusterResponse = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.Cluster') as gax.protobuf.Type; + const stopClusterMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.ClusterOperationMetadata') as gax.protobuf.Type; + const startClusterResponse = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.Cluster') as gax.protobuf.Type; + const startClusterMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.ClusterOperationMetadata') as gax.protobuf.Type; + const deleteClusterResponse = protoFilesRoot.lookup( + '.google.protobuf.Empty') as gax.protobuf.Type; + const deleteClusterMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.ClusterOperationMetadata') as gax.protobuf.Type; + const diagnoseClusterResponse = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.DiagnoseClusterResults') as gax.protobuf.Type; + const diagnoseClusterMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.ClusterOperationMetadata') as gax.protobuf.Type; + + this.descriptors.longrunning = { + createCluster: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + createClusterResponse.decode.bind(createClusterResponse), + createClusterMetadata.decode.bind(createClusterMetadata)), + updateCluster: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + updateClusterResponse.decode.bind(updateClusterResponse), + updateClusterMetadata.decode.bind(updateClusterMetadata)), + stopCluster: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + stopClusterResponse.decode.bind(stopClusterResponse), + stopClusterMetadata.decode.bind(stopClusterMetadata)), + startCluster: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + startClusterResponse.decode.bind(startClusterResponse), + startClusterMetadata.decode.bind(startClusterMetadata)), + deleteCluster: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + deleteClusterResponse.decode.bind(deleteClusterResponse), + deleteClusterMetadata.decode.bind(deleteClusterMetadata)), + diagnoseCluster: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + diagnoseClusterResponse.decode.bind(diagnoseClusterResponse), + diagnoseClusterMetadata.decode.bind(diagnoseClusterMetadata)) + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.dataproc.v1.ClusterController', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.clusterControllerStub) { + return this.clusterControllerStub; + } + + // Put together the "service stub" for + // google.cloud.dataproc.v1.ClusterController. + this.clusterControllerStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.dataproc.v1.ClusterController') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.dataproc.v1.ClusterController, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const clusterControllerStubMethods = + ['createCluster', 'updateCluster', 'stopCluster', 'startCluster', 'deleteCluster', 'getCluster', 'listClusters', 'diagnoseCluster']; + for (const methodName of clusterControllerStubMethods) { + const callPromise = this.clusterControllerStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + this.descriptors.longrunning[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.clusterControllerStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'dataproc.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'dataproc.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Gets the resource representation for a cluster in a project. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the cluster + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {string} request.clusterName + * Required. The cluster name. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Cluster]{@link google.cloud.dataproc.v1.Cluster}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.get_cluster.js + * region_tag:dataproc_v1_generated_ClusterController_GetCluster_async + */ + getCluster( + request?: protos.google.cloud.dataproc.v1.IGetClusterRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.ICluster, + protos.google.cloud.dataproc.v1.IGetClusterRequest|undefined, {}|undefined + ]>; + getCluster( + request: protos.google.cloud.dataproc.v1.IGetClusterRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.ICluster, + protos.google.cloud.dataproc.v1.IGetClusterRequest|null|undefined, + {}|null|undefined>): void; + getCluster( + request: protos.google.cloud.dataproc.v1.IGetClusterRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.ICluster, + protos.google.cloud.dataproc.v1.IGetClusterRequest|null|undefined, + {}|null|undefined>): void; + getCluster( + request?: protos.google.cloud.dataproc.v1.IGetClusterRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.dataproc.v1.ICluster, + protos.google.cloud.dataproc.v1.IGetClusterRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.dataproc.v1.ICluster, + protos.google.cloud.dataproc.v1.IGetClusterRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.dataproc.v1.ICluster, + protos.google.cloud.dataproc.v1.IGetClusterRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + 'cluster_name': request.clusterName ?? '', + }); + this.initialize(); + return this.innerApiCalls.getCluster(request, options, callback); + } + +/** + * Creates a cluster in a project. The returned + * {@link google.longrunning.Operation.metadata|Operation.metadata} will be + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the cluster + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {google.cloud.dataproc.v1.Cluster} request.cluster + * Required. The cluster to create. + * @param {string} [request.requestId] + * Optional. A unique ID used to identify the request. If the server receives + * two + * [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s + * with the same id, then the second request will be ignored and the + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. + * + * It is recommended to always set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {google.cloud.dataproc.v1.FailureAction} [request.actionOnFailedPrimaryWorkers] + * Optional. Failure action when primary worker creation fails. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.create_cluster.js + * region_tag:dataproc_v1_generated_ClusterController_CreateCluster_async + */ + createCluster( + request?: protos.google.cloud.dataproc.v1.ICreateClusterRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + createCluster( + request: protos.google.cloud.dataproc.v1.ICreateClusterRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + createCluster( + request: protos.google.cloud.dataproc.v1.ICreateClusterRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + createCluster( + request?: protos.google.cloud.dataproc.v1.ICreateClusterRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + }); + this.initialize(); + return this.innerApiCalls.createCluster(request, options, callback); + } +/** + * Check the status of the long running operation returned by `createCluster()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.create_cluster.js + * region_tag:dataproc_v1_generated_ClusterController_CreateCluster_async + */ + async checkCreateClusterProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.createCluster, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } +/** + * Updates a cluster in a project. The returned + * {@link google.longrunning.Operation.metadata|Operation.metadata} will be + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * The cluster must be in a + * {@link google.cloud.dataproc.v1.ClusterStatus.State|`RUNNING`} state or an error + * is returned. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project the + * cluster belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {string} request.clusterName + * Required. The cluster name. + * @param {google.cloud.dataproc.v1.Cluster} request.cluster + * Required. The changes to the cluster. + * @param {google.protobuf.Duration} [request.gracefulDecommissionTimeout] + * Optional. Timeout for graceful YARN decomissioning. Graceful + * decommissioning allows removing nodes from the cluster without + * interrupting jobs in progress. Timeout specifies how long to wait for jobs + * in progress to finish before forcefully removing nodes (and potentially + * interrupting jobs). Default timeout is 0 (for forceful decommission), and + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + * + * Only supported on Dataproc image versions 1.2 and higher. + * @param {google.protobuf.FieldMask} request.updateMask + * Required. Specifies the path, relative to `Cluster`, of + * the field to update. For example, to change the number of workers + * in a cluster to 5, the `update_mask` parameter would be + * specified as `config.worker_config.num_instances`, + * and the `PATCH` request body would specify the new value, as follows: + * + * { + * "config":{ + * "workerConfig":{ + * "numInstances":"5" + * } + * } + * } + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: + * + * { + * "config":{ + * "secondaryWorkerConfig":{ + * "numInstances":"5" + * } + * } + * } + * Note: Currently, only the following fields can be updated: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or + * change autoscaling policies
+ * @param {string} [request.requestId] + * Optional. A unique ID used to identify the request. If the server + * receives two + * [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s + * with the same id, then the second request will be ignored and the + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. + * + * It is recommended to always set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.update_cluster.js + * region_tag:dataproc_v1_generated_ClusterController_UpdateCluster_async + */ + updateCluster( + request?: protos.google.cloud.dataproc.v1.IUpdateClusterRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + updateCluster( + request: protos.google.cloud.dataproc.v1.IUpdateClusterRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + updateCluster( + request: protos.google.cloud.dataproc.v1.IUpdateClusterRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + updateCluster( + request?: protos.google.cloud.dataproc.v1.IUpdateClusterRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + 'cluster_name': request.clusterName ?? '', + }); + this.initialize(); + return this.innerApiCalls.updateCluster(request, options, callback); + } +/** + * Check the status of the long running operation returned by `updateCluster()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.update_cluster.js + * region_tag:dataproc_v1_generated_ClusterController_UpdateCluster_async + */ + async checkUpdateClusterProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.updateCluster, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } +/** + * Stops a cluster in a project. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project the + * cluster belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {string} request.clusterName + * Required. The cluster name. + * @param {string} [request.clusterUuid] + * Optional. Specifying the `cluster_uuid` means the RPC will fail + * (with error NOT_FOUND) if a cluster with the specified UUID does not exist. + * @param {string} [request.requestId] + * Optional. A unique ID used to identify the request. If the server + * receives two + * [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s + * with the same id, then the second request will be ignored and the + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. + * + * Recommendation: Set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.stop_cluster.js + * region_tag:dataproc_v1_generated_ClusterController_StopCluster_async + */ + stopCluster( + request?: protos.google.cloud.dataproc.v1.IStopClusterRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + stopCluster( + request: protos.google.cloud.dataproc.v1.IStopClusterRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + stopCluster( + request: protos.google.cloud.dataproc.v1.IStopClusterRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + stopCluster( + request?: protos.google.cloud.dataproc.v1.IStopClusterRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + 'cluster_name': request.clusterName ?? '', + }); + this.initialize(); + return this.innerApiCalls.stopCluster(request, options, callback); + } +/** + * Check the status of the long running operation returned by `stopCluster()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.stop_cluster.js + * region_tag:dataproc_v1_generated_ClusterController_StopCluster_async + */ + async checkStopClusterProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.stopCluster, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } +/** + * Starts a cluster in a project. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project the + * cluster belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {string} request.clusterName + * Required. The cluster name. + * @param {string} [request.clusterUuid] + * Optional. Specifying the `cluster_uuid` means the RPC will fail + * (with error NOT_FOUND) if a cluster with the specified UUID does not exist. + * @param {string} [request.requestId] + * Optional. A unique ID used to identify the request. If the server + * receives two + * [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s + * with the same id, then the second request will be ignored and the + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. + * + * Recommendation: Set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.start_cluster.js + * region_tag:dataproc_v1_generated_ClusterController_StartCluster_async + */ + startCluster( + request?: protos.google.cloud.dataproc.v1.IStartClusterRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + startCluster( + request: protos.google.cloud.dataproc.v1.IStartClusterRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + startCluster( + request: protos.google.cloud.dataproc.v1.IStartClusterRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + startCluster( + request?: protos.google.cloud.dataproc.v1.IStartClusterRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + 'cluster_name': request.clusterName ?? '', + }); + this.initialize(); + return this.innerApiCalls.startCluster(request, options, callback); + } +/** + * Check the status of the long running operation returned by `startCluster()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.start_cluster.js + * region_tag:dataproc_v1_generated_ClusterController_StartCluster_async + */ + async checkStartClusterProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.startCluster, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } +/** + * Deletes a cluster in a project. The returned + * {@link google.longrunning.Operation.metadata|Operation.metadata} will be + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the cluster + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {string} request.clusterName + * Required. The cluster name. + * @param {string} [request.clusterUuid] + * Optional. Specifying the `cluster_uuid` means the RPC should fail + * (with error NOT_FOUND) if cluster with specified UUID does not exist. + * @param {string} [request.requestId] + * Optional. A unique ID used to identify the request. If the server + * receives two + * [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s + * with the same id, then the second request will be ignored and the + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. + * + * It is recommended to always set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.delete_cluster.js + * region_tag:dataproc_v1_generated_ClusterController_DeleteCluster_async + */ + deleteCluster( + request?: protos.google.cloud.dataproc.v1.IDeleteClusterRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + deleteCluster( + request: protos.google.cloud.dataproc.v1.IDeleteClusterRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + deleteCluster( + request: protos.google.cloud.dataproc.v1.IDeleteClusterRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + deleteCluster( + request?: protos.google.cloud.dataproc.v1.IDeleteClusterRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + 'cluster_name': request.clusterName ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteCluster(request, options, callback); + } +/** + * Check the status of the long running operation returned by `deleteCluster()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.delete_cluster.js + * region_tag:dataproc_v1_generated_ClusterController_DeleteCluster_async + */ + async checkDeleteClusterProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.deleteCluster, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } +/** + * Gets cluster diagnostic information. The returned + * {@link google.longrunning.Operation.metadata|Operation.metadata} will be + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * After the operation completes, + * {@link google.longrunning.Operation.response|Operation.response} + * contains + * [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the cluster + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {string} request.clusterName + * Required. The cluster name. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.diagnose_cluster.js + * region_tag:dataproc_v1_generated_ClusterController_DiagnoseCluster_async + */ + diagnoseCluster( + request?: protos.google.cloud.dataproc.v1.IDiagnoseClusterRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + diagnoseCluster( + request: protos.google.cloud.dataproc.v1.IDiagnoseClusterRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + diagnoseCluster( + request: protos.google.cloud.dataproc.v1.IDiagnoseClusterRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + diagnoseCluster( + request?: protos.google.cloud.dataproc.v1.IDiagnoseClusterRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + 'cluster_name': request.clusterName ?? '', + }); + this.initialize(); + return this.innerApiCalls.diagnoseCluster(request, options, callback); + } +/** + * Check the status of the long running operation returned by `diagnoseCluster()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.diagnose_cluster.js + * region_tag:dataproc_v1_generated_ClusterController_DiagnoseCluster_async + */ + async checkDiagnoseClusterProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.diagnoseCluster, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } + /** + * Lists all regions/{region}/clusters in a project alphabetically. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the cluster + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {string} [request.filter] + * Optional. A filter constraining the clusters to list. Filters are + * case-sensitive and have the following syntax: + * + * field = value [AND [field = value]] ... + * + * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, + * and `[KEY]` is a label key. **value** can be `*` to match all values. + * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, + * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` + * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` + * contains the `DELETING` and `ERROR` states. + * `clusterName` is the name of the cluster provided at creation time. + * Only the logical `AND` operator is supported; space-separated items are + * treated as having an implicit `AND` operator. + * + * Example filter: + * + * status.state = ACTIVE AND clusterName = mycluster + * AND labels.env = staging AND labels.starred = * + * @param {number} [request.pageSize] + * Optional. The standard List page size. + * @param {string} [request.pageToken] + * Optional. The standard List page token. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of [Cluster]{@link google.cloud.dataproc.v1.Cluster}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listClustersAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + */ + listClusters( + request?: protos.google.cloud.dataproc.v1.IListClustersRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.ICluster[], + protos.google.cloud.dataproc.v1.IListClustersRequest|null, + protos.google.cloud.dataproc.v1.IListClustersResponse + ]>; + listClusters( + request: protos.google.cloud.dataproc.v1.IListClustersRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.dataproc.v1.IListClustersRequest, + protos.google.cloud.dataproc.v1.IListClustersResponse|null|undefined, + protos.google.cloud.dataproc.v1.ICluster>): void; + listClusters( + request: protos.google.cloud.dataproc.v1.IListClustersRequest, + callback: PaginationCallback< + protos.google.cloud.dataproc.v1.IListClustersRequest, + protos.google.cloud.dataproc.v1.IListClustersResponse|null|undefined, + protos.google.cloud.dataproc.v1.ICluster>): void; + listClusters( + request?: protos.google.cloud.dataproc.v1.IListClustersRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.dataproc.v1.IListClustersRequest, + protos.google.cloud.dataproc.v1.IListClustersResponse|null|undefined, + protos.google.cloud.dataproc.v1.ICluster>, + callback?: PaginationCallback< + protos.google.cloud.dataproc.v1.IListClustersRequest, + protos.google.cloud.dataproc.v1.IListClustersResponse|null|undefined, + protos.google.cloud.dataproc.v1.ICluster>): + Promise<[ + protos.google.cloud.dataproc.v1.ICluster[], + protos.google.cloud.dataproc.v1.IListClustersRequest|null, + protos.google.cloud.dataproc.v1.IListClustersResponse + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + }); + this.initialize(); + return this.innerApiCalls.listClusters(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the cluster + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {string} [request.filter] + * Optional. A filter constraining the clusters to list. Filters are + * case-sensitive and have the following syntax: + * + * field = value [AND [field = value]] ... + * + * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, + * and `[KEY]` is a label key. **value** can be `*` to match all values. + * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, + * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` + * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` + * contains the `DELETING` and `ERROR` states. + * `clusterName` is the name of the cluster provided at creation time. + * Only the logical `AND` operator is supported; space-separated items are + * treated as having an implicit `AND` operator. + * + * Example filter: + * + * status.state = ACTIVE AND clusterName = mycluster + * AND labels.env = staging AND labels.starred = * + * @param {number} [request.pageSize] + * Optional. The standard List page size. + * @param {string} [request.pageToken] + * Optional. The standard List page token. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing [Cluster]{@link google.cloud.dataproc.v1.Cluster} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listClustersAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + */ + listClustersStream( + request?: protos.google.cloud.dataproc.v1.IListClustersRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + }); + const defaultCallSettings = this._defaults['listClusters']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listClusters.createStream( + this.innerApiCalls.listClusters as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listClusters`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the cluster + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {string} [request.filter] + * Optional. A filter constraining the clusters to list. Filters are + * case-sensitive and have the following syntax: + * + * field = value [AND [field = value]] ... + * + * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, + * and `[KEY]` is a label key. **value** can be `*` to match all values. + * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, + * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` + * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` + * contains the `DELETING` and `ERROR` states. + * `clusterName` is the name of the cluster provided at creation time. + * Only the logical `AND` operator is supported; space-separated items are + * treated as having an implicit `AND` operator. + * + * Example filter: + * + * status.state = ACTIVE AND clusterName = mycluster + * AND labels.env = staging AND labels.starred = * + * @param {number} [request.pageSize] + * Optional. The standard List page size. + * @param {string} [request.pageToken] + * Optional. The standard List page token. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows [async iteration](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols). + * When you iterate the returned iterable, each element will be an object representing + * [Cluster]{@link google.cloud.dataproc.v1.Cluster}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + * @example include:samples/generated/v1/cluster_controller.list_clusters.js + * region_tag:dataproc_v1_generated_ClusterController_ListClusters_async + */ + listClustersAsync( + request?: protos.google.cloud.dataproc.v1.IListClustersRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + }); + const defaultCallSettings = this._defaults['listClusters']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listClusters.asyncIterate( + this.innerApiCalls['listClusters'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + // -------------------- + // -- Path templates -- + // -------------------- + + /** + * Return a fully-qualified batch resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} batch + * @returns {string} Resource name string. + */ + batchPath(project:string,location:string,batch:string) { + return this.pathTemplates.batchPathTemplate.render({ + project: project, + location: location, + batch: batch, + }); + } + + /** + * Parse the project from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the project. + */ + matchProjectFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).project; + } + + /** + * Parse the location from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the location. + */ + matchLocationFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).location; + } + + /** + * Parse the batch from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the batch. + */ + matchBatchFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).batch; + } + + /** + * Return a fully-qualified nodeGroup resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} cluster + * @param {string} node_group + * @returns {string} Resource name string. + */ + nodeGroupPath(project:string,region:string,cluster:string,nodeGroup:string) { + return this.pathTemplates.nodeGroupPathTemplate.render({ + project: project, + region: region, + cluster: cluster, + node_group: nodeGroup, + }); + } + + /** + * Parse the project from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the project. + */ + matchProjectFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).project; + } + + /** + * Parse the region from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the region. + */ + matchRegionFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; + } + + /** + * Parse the cluster from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the cluster. + */ + matchClusterFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).cluster; + } + + /** + * Parse the node_group from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the node_group. + */ + matchNodeGroupFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).node_group; + } + + /** + * Return a fully-qualified projectLocationAutoscalingPolicy resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} autoscaling_policy + * @returns {string} Resource name string. + */ + projectLocationAutoscalingPolicyPath(project:string,location:string,autoscalingPolicy:string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render({ + project: project, + location: location, + autoscaling_policy: autoscalingPolicy, + }); + } + + /** + * Parse the project from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).project; + } + + /** + * Parse the location from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the location. + */ + matchLocationFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).location; + } + + /** + * Parse the autoscaling_policy from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the autoscaling_policy. + */ + matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).autoscaling_policy; + } + + /** + * Return a fully-qualified projectLocationWorkflowTemplate resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} workflow_template + * @returns {string} Resource name string. + */ + projectLocationWorkflowTemplatePath(project:string,location:string,workflowTemplate:string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render({ + project: project, + location: location, + workflow_template: workflowTemplate, + }); + } + + /** + * Parse the project from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).project; + } + + /** + * Parse the location from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the location. + */ + matchLocationFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).location; + } + + /** + * Parse the workflow_template from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the workflow_template. + */ + matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).workflow_template; + } + + /** + * Return a fully-qualified projectRegionAutoscalingPolicy resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} autoscaling_policy + * @returns {string} Resource name string. + */ + projectRegionAutoscalingPolicyPath(project:string,region:string,autoscalingPolicy:string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render({ + project: project, + region: region, + autoscaling_policy: autoscalingPolicy, + }); + } + + /** + * Parse the project from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).project; + } + + /** + * Parse the region from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the region. + */ + matchRegionFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).region; + } + + /** + * Parse the autoscaling_policy from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the autoscaling_policy. + */ + matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).autoscaling_policy; + } + + /** + * Return a fully-qualified projectRegionWorkflowTemplate resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} workflow_template + * @returns {string} Resource name string. + */ + projectRegionWorkflowTemplatePath(project:string,region:string,workflowTemplate:string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render({ + project: project, + region: region, + workflow_template: workflowTemplate, + }); + } + + /** + * Parse the project from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).project; + } + + /** + * Parse the region from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the region. + */ + matchRegionFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).region; + } + + /** + * Parse the workflow_template from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the workflow_template. + */ + matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).workflow_template; + } + + /** + * Return a fully-qualified service resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} service + * @returns {string} Resource name string. + */ + servicePath(project:string,location:string,service:string) { + return this.pathTemplates.servicePathTemplate.render({ + project: project, + location: location, + service: service, + }); + } + + /** + * Parse the project from Service resource. + * + * @param {string} serviceName + * A fully-qualified path representing Service resource. + * @returns {string} A string representing the project. + */ + matchProjectFromServiceName(serviceName: string) { + return this.pathTemplates.servicePathTemplate.match(serviceName).project; + } + + /** + * Parse the location from Service resource. + * + * @param {string} serviceName + * A fully-qualified path representing Service resource. + * @returns {string} A string representing the location. + */ + matchLocationFromServiceName(serviceName: string) { + return this.pathTemplates.servicePathTemplate.match(serviceName).location; + } + + /** + * Parse the service from Service resource. + * + * @param {string} serviceName + * A fully-qualified path representing Service resource. + * @returns {string} A string representing the service. + */ + matchServiceFromServiceName(serviceName: string) { + return this.pathTemplates.servicePathTemplate.match(serviceName).service; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.clusterControllerStub && !this._terminated) { + return this.clusterControllerStub.then(stub => { + this._terminated = true; + stub.close(); + this.operationsClient.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client_config.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client_config.json new file mode 100644 index 00000000000..6f5f5f3dd1d --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client_config.json @@ -0,0 +1,72 @@ +{ + "interfaces": { + "google.cloud.dataproc.v1.ClusterController": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ], + "unavailable": [ + "UNAVAILABLE" + ], + "deadline_exceeded_internal_unavailable": [ + "DEADLINE_EXCEEDED", + "INTERNAL", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "CreateCluster": { + "timeout_millis": 300000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, + "UpdateCluster": { + "timeout_millis": 300000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, + "StopCluster": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "StartCluster": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "DeleteCluster": { + "timeout_millis": 300000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, + "GetCluster": { + "timeout_millis": 300000, + "retry_codes_name": "deadline_exceeded_internal_unavailable", + "retry_params_name": "default" + }, + "ListClusters": { + "timeout_millis": 300000, + "retry_codes_name": "deadline_exceeded_internal_unavailable", + "retry_params_name": "default" + }, + "DiagnoseCluster": { + "timeout_millis": 300000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_proto_list.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_proto_list.json new file mode 100644 index 00000000000..3bb7ccf055a --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_proto_list.json @@ -0,0 +1,10 @@ +[ + "../../protos/google/cloud/dataproc/v1/autoscaling_policies.proto", + "../../protos/google/cloud/dataproc/v1/batches.proto", + "../../protos/google/cloud/dataproc/v1/clusters.proto", + "../../protos/google/cloud/dataproc/v1/jobs.proto", + "../../protos/google/cloud/dataproc/v1/node_groups.proto", + "../../protos/google/cloud/dataproc/v1/operations.proto", + "../../protos/google/cloud/dataproc/v1/shared.proto", + "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" +] diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/gapic_metadata.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/gapic_metadata.json new file mode 100644 index 00000000000..36b4ef4bf5d --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/gapic_metadata.json @@ -0,0 +1,453 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "typescript", + "protoPackage": "google.cloud.dataproc.v1", + "libraryPackage": "@google-cloud/dataproc", + "services": { + "AutoscalingPolicyService": { + "clients": { + "grpc": { + "libraryClient": "AutoscalingPolicyServiceClient", + "rpcs": { + "CreateAutoscalingPolicy": { + "methods": [ + "createAutoscalingPolicy" + ] + }, + "UpdateAutoscalingPolicy": { + "methods": [ + "updateAutoscalingPolicy" + ] + }, + "GetAutoscalingPolicy": { + "methods": [ + "getAutoscalingPolicy" + ] + }, + "DeleteAutoscalingPolicy": { + "methods": [ + "deleteAutoscalingPolicy" + ] + }, + "ListAutoscalingPolicies": { + "methods": [ + "listAutoscalingPolicies", + "listAutoscalingPoliciesStream", + "listAutoscalingPoliciesAsync" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "AutoscalingPolicyServiceClient", + "rpcs": { + "CreateAutoscalingPolicy": { + "methods": [ + "createAutoscalingPolicy" + ] + }, + "UpdateAutoscalingPolicy": { + "methods": [ + "updateAutoscalingPolicy" + ] + }, + "GetAutoscalingPolicy": { + "methods": [ + "getAutoscalingPolicy" + ] + }, + "DeleteAutoscalingPolicy": { + "methods": [ + "deleteAutoscalingPolicy" + ] + }, + "ListAutoscalingPolicies": { + "methods": [ + "listAutoscalingPolicies", + "listAutoscalingPoliciesStream", + "listAutoscalingPoliciesAsync" + ] + } + } + } + } + }, + "BatchController": { + "clients": { + "grpc": { + "libraryClient": "BatchControllerClient", + "rpcs": { + "GetBatch": { + "methods": [ + "getBatch" + ] + }, + "DeleteBatch": { + "methods": [ + "deleteBatch" + ] + }, + "CreateBatch": { + "methods": [ + "createBatch" + ] + }, + "ListBatches": { + "methods": [ + "listBatches", + "listBatchesStream", + "listBatchesAsync" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "BatchControllerClient", + "rpcs": { + "GetBatch": { + "methods": [ + "getBatch" + ] + }, + "DeleteBatch": { + "methods": [ + "deleteBatch" + ] + }, + "CreateBatch": { + "methods": [ + "createBatch" + ] + }, + "ListBatches": { + "methods": [ + "listBatches", + "listBatchesStream", + "listBatchesAsync" + ] + } + } + } + } + }, + "ClusterController": { + "clients": { + "grpc": { + "libraryClient": "ClusterControllerClient", + "rpcs": { + "GetCluster": { + "methods": [ + "getCluster" + ] + }, + "CreateCluster": { + "methods": [ + "createCluster" + ] + }, + "UpdateCluster": { + "methods": [ + "updateCluster" + ] + }, + "StopCluster": { + "methods": [ + "stopCluster" + ] + }, + "StartCluster": { + "methods": [ + "startCluster" + ] + }, + "DeleteCluster": { + "methods": [ + "deleteCluster" + ] + }, + "DiagnoseCluster": { + "methods": [ + "diagnoseCluster" + ] + }, + "ListClusters": { + "methods": [ + "listClusters", + "listClustersStream", + "listClustersAsync" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "ClusterControllerClient", + "rpcs": { + "GetCluster": { + "methods": [ + "getCluster" + ] + }, + "CreateCluster": { + "methods": [ + "createCluster" + ] + }, + "UpdateCluster": { + "methods": [ + "updateCluster" + ] + }, + "StopCluster": { + "methods": [ + "stopCluster" + ] + }, + "StartCluster": { + "methods": [ + "startCluster" + ] + }, + "DeleteCluster": { + "methods": [ + "deleteCluster" + ] + }, + "DiagnoseCluster": { + "methods": [ + "diagnoseCluster" + ] + }, + "ListClusters": { + "methods": [ + "listClusters", + "listClustersStream", + "listClustersAsync" + ] + } + } + } + } + }, + "JobController": { + "clients": { + "grpc": { + "libraryClient": "JobControllerClient", + "rpcs": { + "SubmitJob": { + "methods": [ + "submitJob" + ] + }, + "GetJob": { + "methods": [ + "getJob" + ] + }, + "UpdateJob": { + "methods": [ + "updateJob" + ] + }, + "CancelJob": { + "methods": [ + "cancelJob" + ] + }, + "DeleteJob": { + "methods": [ + "deleteJob" + ] + }, + "SubmitJobAsOperation": { + "methods": [ + "submitJobAsOperation" + ] + }, + "ListJobs": { + "methods": [ + "listJobs", + "listJobsStream", + "listJobsAsync" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "JobControllerClient", + "rpcs": { + "SubmitJob": { + "methods": [ + "submitJob" + ] + }, + "GetJob": { + "methods": [ + "getJob" + ] + }, + "UpdateJob": { + "methods": [ + "updateJob" + ] + }, + "CancelJob": { + "methods": [ + "cancelJob" + ] + }, + "DeleteJob": { + "methods": [ + "deleteJob" + ] + }, + "SubmitJobAsOperation": { + "methods": [ + "submitJobAsOperation" + ] + }, + "ListJobs": { + "methods": [ + "listJobs", + "listJobsStream", + "listJobsAsync" + ] + } + } + } + } + }, + "NodeGroupController": { + "clients": { + "grpc": { + "libraryClient": "NodeGroupControllerClient", + "rpcs": { + "GetNodeGroup": { + "methods": [ + "getNodeGroup" + ] + }, + "CreateNodeGroup": { + "methods": [ + "createNodeGroup" + ] + }, + "ResizeNodeGroup": { + "methods": [ + "resizeNodeGroup" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "NodeGroupControllerClient", + "rpcs": { + "GetNodeGroup": { + "methods": [ + "getNodeGroup" + ] + }, + "CreateNodeGroup": { + "methods": [ + "createNodeGroup" + ] + }, + "ResizeNodeGroup": { + "methods": [ + "resizeNodeGroup" + ] + } + } + } + } + }, + "WorkflowTemplateService": { + "clients": { + "grpc": { + "libraryClient": "WorkflowTemplateServiceClient", + "rpcs": { + "CreateWorkflowTemplate": { + "methods": [ + "createWorkflowTemplate" + ] + }, + "GetWorkflowTemplate": { + "methods": [ + "getWorkflowTemplate" + ] + }, + "UpdateWorkflowTemplate": { + "methods": [ + "updateWorkflowTemplate" + ] + }, + "DeleteWorkflowTemplate": { + "methods": [ + "deleteWorkflowTemplate" + ] + }, + "InstantiateWorkflowTemplate": { + "methods": [ + "instantiateWorkflowTemplate" + ] + }, + "InstantiateInlineWorkflowTemplate": { + "methods": [ + "instantiateInlineWorkflowTemplate" + ] + }, + "ListWorkflowTemplates": { + "methods": [ + "listWorkflowTemplates", + "listWorkflowTemplatesStream", + "listWorkflowTemplatesAsync" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "WorkflowTemplateServiceClient", + "rpcs": { + "CreateWorkflowTemplate": { + "methods": [ + "createWorkflowTemplate" + ] + }, + "GetWorkflowTemplate": { + "methods": [ + "getWorkflowTemplate" + ] + }, + "UpdateWorkflowTemplate": { + "methods": [ + "updateWorkflowTemplate" + ] + }, + "DeleteWorkflowTemplate": { + "methods": [ + "deleteWorkflowTemplate" + ] + }, + "InstantiateWorkflowTemplate": { + "methods": [ + "instantiateWorkflowTemplate" + ] + }, + "InstantiateInlineWorkflowTemplate": { + "methods": [ + "instantiateInlineWorkflowTemplate" + ] + }, + "ListWorkflowTemplates": { + "methods": [ + "listWorkflowTemplates", + "listWorkflowTemplatesStream", + "listWorkflowTemplatesAsync" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/index.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/index.ts new file mode 100644 index 00000000000..10f41804708 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/index.ts @@ -0,0 +1,24 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +export {AutoscalingPolicyServiceClient} from './autoscaling_policy_service_client'; +export {BatchControllerClient} from './batch_controller_client'; +export {ClusterControllerClient} from './cluster_controller_client'; +export {JobControllerClient} from './job_controller_client'; +export {NodeGroupControllerClient} from './node_group_controller_client'; +export {WorkflowTemplateServiceClient} from './workflow_template_service_client'; diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client.ts new file mode 100644 index 00000000000..cf8f723808b --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client.ts @@ -0,0 +1,1465 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1/job_controller_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './job_controller_client_config.json'; +const version = require('../../../package.json').version; + +/** + * The JobController provides methods to manage jobs. + * @class + * @memberof v1 + */ +export class JobControllerClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + pathTemplates: {[name: string]: gax.PathTemplate}; + operationsClient: gax.OperationsClient; + jobControllerStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of JobControllerClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new JobControllerClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof JobControllerClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // This API contains "path templates"; forward-slash-separated + // identifiers to uniquely identify resources within the API. + // Create useful helper objects for these. + this.pathTemplates = { + batchPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/batches/{batch}' + ), + nodeGroupPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' + ), + projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' + ), + projectLocationWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/workflowTemplates/{workflow_template}' + ), + projectRegionAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}' + ), + projectRegionWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' + ), + }; + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listJobs: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'jobs') + }; + + const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [{selector: 'google.iam.v1.IAMPolicy.GetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:getIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:getIamPolicy',body: '*',}], + },{selector: 'google.iam.v1.IAMPolicy.SetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:setIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:setIamPolicy',body: '*',}], + },{selector: 'google.iam.v1.IAMPolicy.TestIamPermissions',post: '/v1/{resource=projects/*/regions/*/clusters/*}:testIamPermissions',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:testIamPermissions',body: '*',}], + },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1/{name=projects/*/regions/*/operations/*}:cancel',},{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.ListOperations',get: '/v1/{name=projects/*/regions/*/operations}',}]; + } + this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + const submitJobAsOperationResponse = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.Job') as gax.protobuf.Type; + const submitJobAsOperationMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.JobMetadata') as gax.protobuf.Type; + + this.descriptors.longrunning = { + submitJobAsOperation: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + submitJobAsOperationResponse.decode.bind(submitJobAsOperationResponse), + submitJobAsOperationMetadata.decode.bind(submitJobAsOperationMetadata)) + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.dataproc.v1.JobController', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.jobControllerStub) { + return this.jobControllerStub; + } + + // Put together the "service stub" for + // google.cloud.dataproc.v1.JobController. + this.jobControllerStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.dataproc.v1.JobController') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.dataproc.v1.JobController, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const jobControllerStubMethods = + ['submitJob', 'submitJobAsOperation', 'getJob', 'listJobs', 'updateJob', 'cancelJob', 'deleteJob']; + for (const methodName of jobControllerStubMethods) { + const callPromise = this.jobControllerStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + this.descriptors.longrunning[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.jobControllerStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'dataproc.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'dataproc.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Submits a job to a cluster. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {google.cloud.dataproc.v1.Job} request.job + * Required. The job resource. + * @param {string} [request.requestId] + * Optional. A unique id used to identify the request. If the server + * receives two + * [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s + * with the same id, then the second request will be ignored and the + * first {@link google.cloud.dataproc.v1.Job|Job} created and stored in the backend + * is returned. + * + * It is recommended to always set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The id must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Job]{@link google.cloud.dataproc.v1.Job}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/job_controller.submit_job.js + * region_tag:dataproc_v1_generated_JobController_SubmitJob_async + */ + submitJob( + request?: protos.google.cloud.dataproc.v1.ISubmitJobRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.ISubmitJobRequest|undefined, {}|undefined + ]>; + submitJob( + request: protos.google.cloud.dataproc.v1.ISubmitJobRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.ISubmitJobRequest|null|undefined, + {}|null|undefined>): void; + submitJob( + request: protos.google.cloud.dataproc.v1.ISubmitJobRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.ISubmitJobRequest|null|undefined, + {}|null|undefined>): void; + submitJob( + request?: protos.google.cloud.dataproc.v1.ISubmitJobRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.ISubmitJobRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.ISubmitJobRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.ISubmitJobRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + }); + this.initialize(); + return this.innerApiCalls.submitJob(request, options, callback); + } +/** + * Gets the resource representation for a job in a project. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {string} request.jobId + * Required. The job ID. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Job]{@link google.cloud.dataproc.v1.Job}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/job_controller.get_job.js + * region_tag:dataproc_v1_generated_JobController_GetJob_async + */ + getJob( + request?: protos.google.cloud.dataproc.v1.IGetJobRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IGetJobRequest|undefined, {}|undefined + ]>; + getJob( + request: protos.google.cloud.dataproc.v1.IGetJobRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IGetJobRequest|null|undefined, + {}|null|undefined>): void; + getJob( + request: protos.google.cloud.dataproc.v1.IGetJobRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IGetJobRequest|null|undefined, + {}|null|undefined>): void; + getJob( + request?: protos.google.cloud.dataproc.v1.IGetJobRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IGetJobRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IGetJobRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IGetJobRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + 'job_id': request.jobId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getJob(request, options, callback); + } +/** + * Updates a job in a project. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {string} request.jobId + * Required. The job ID. + * @param {google.cloud.dataproc.v1.Job} request.job + * Required. The changes to the job. + * @param {google.protobuf.FieldMask} request.updateMask + * Required. Specifies the path, relative to Job, of + * the field to update. For example, to update the labels of a Job the + * update_mask parameter would be specified as + * labels, and the `PATCH` request body would specify the new + * value. Note: Currently, labels is the only + * field that can be updated. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Job]{@link google.cloud.dataproc.v1.Job}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/job_controller.update_job.js + * region_tag:dataproc_v1_generated_JobController_UpdateJob_async + */ + updateJob( + request?: protos.google.cloud.dataproc.v1.IUpdateJobRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IUpdateJobRequest|undefined, {}|undefined + ]>; + updateJob( + request: protos.google.cloud.dataproc.v1.IUpdateJobRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IUpdateJobRequest|null|undefined, + {}|null|undefined>): void; + updateJob( + request: protos.google.cloud.dataproc.v1.IUpdateJobRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IUpdateJobRequest|null|undefined, + {}|null|undefined>): void; + updateJob( + request?: protos.google.cloud.dataproc.v1.IUpdateJobRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IUpdateJobRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IUpdateJobRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IUpdateJobRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + 'job_id': request.jobId ?? '', + }); + this.initialize(); + return this.innerApiCalls.updateJob(request, options, callback); + } +/** + * Starts a job cancellation request. To access the job resource + * after cancellation, call + * [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) + * or + * [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {string} request.jobId + * Required. The job ID. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Job]{@link google.cloud.dataproc.v1.Job}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/job_controller.cancel_job.js + * region_tag:dataproc_v1_generated_JobController_CancelJob_async + */ + cancelJob( + request?: protos.google.cloud.dataproc.v1.ICancelJobRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.ICancelJobRequest|undefined, {}|undefined + ]>; + cancelJob( + request: protos.google.cloud.dataproc.v1.ICancelJobRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.ICancelJobRequest|null|undefined, + {}|null|undefined>): void; + cancelJob( + request: protos.google.cloud.dataproc.v1.ICancelJobRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.ICancelJobRequest|null|undefined, + {}|null|undefined>): void; + cancelJob( + request?: protos.google.cloud.dataproc.v1.ICancelJobRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.ICancelJobRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.ICancelJobRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.ICancelJobRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + 'job_id': request.jobId ?? '', + }); + this.initialize(); + return this.innerApiCalls.cancelJob(request, options, callback); + } +/** + * Deletes the job from the project. If the job is active, the delete fails, + * and the response returns `FAILED_PRECONDITION`. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {string} request.jobId + * Required. The job ID. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/job_controller.delete_job.js + * region_tag:dataproc_v1_generated_JobController_DeleteJob_async + */ + deleteJob( + request?: protos.google.cloud.dataproc.v1.IDeleteJobRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteJobRequest|undefined, {}|undefined + ]>; + deleteJob( + request: protos.google.cloud.dataproc.v1.IDeleteJobRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteJobRequest|null|undefined, + {}|null|undefined>): void; + deleteJob( + request: protos.google.cloud.dataproc.v1.IDeleteJobRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteJobRequest|null|undefined, + {}|null|undefined>): void; + deleteJob( + request?: protos.google.cloud.dataproc.v1.IDeleteJobRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteJobRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteJobRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteJobRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + 'job_id': request.jobId ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteJob(request, options, callback); + } + +/** + * Submits job to a cluster. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {google.cloud.dataproc.v1.Job} request.job + * Required. The job resource. + * @param {string} [request.requestId] + * Optional. A unique id used to identify the request. If the server + * receives two + * [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s + * with the same id, then the second request will be ignored and the + * first {@link google.cloud.dataproc.v1.Job|Job} created and stored in the backend + * is returned. + * + * It is recommended to always set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The id must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/job_controller.submit_job_as_operation.js + * region_tag:dataproc_v1_generated_JobController_SubmitJobAsOperation_async + */ + submitJobAsOperation( + request?: protos.google.cloud.dataproc.v1.ISubmitJobRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + submitJobAsOperation( + request: protos.google.cloud.dataproc.v1.ISubmitJobRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + submitJobAsOperation( + request: protos.google.cloud.dataproc.v1.ISubmitJobRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + submitJobAsOperation( + request?: protos.google.cloud.dataproc.v1.ISubmitJobRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + }); + this.initialize(); + return this.innerApiCalls.submitJobAsOperation(request, options, callback); + } +/** + * Check the status of the long running operation returned by `submitJobAsOperation()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/job_controller.submit_job_as_operation.js + * region_tag:dataproc_v1_generated_JobController_SubmitJobAsOperation_async + */ + async checkSubmitJobAsOperationProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.submitJobAsOperation, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } + /** + * Lists regions/{region}/jobs in a project. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {number} [request.pageSize] + * Optional. The number of results to return in each response. + * @param {string} [request.pageToken] + * Optional. The page token, returned by a previous call, to request the + * next page of results. + * @param {string} [request.clusterName] + * Optional. If set, the returned jobs list includes only jobs that were + * submitted to the named cluster. + * @param {google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher} [request.jobStateMatcher] + * Optional. Specifies enumerated categories of jobs to list. + * (default = match ALL jobs). + * + * If `filter` is provided, `jobStateMatcher` will be ignored. + * @param {string} [request.filter] + * Optional. A filter constraining the jobs to list. Filters are + * case-sensitive and have the following syntax: + * + * [field = value] AND [field [= value]] ... + * + * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label + * key. **value** can be `*` to match all values. + * `status.state` can be either `ACTIVE` or `NON_ACTIVE`. + * Only the logical `AND` operator is supported; space-separated items are + * treated as having an implicit `AND` operator. + * + * Example filter: + * + * status.state = ACTIVE AND labels.env = staging AND labels.starred = * + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of [Job]{@link google.cloud.dataproc.v1.Job}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listJobsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + */ + listJobs( + request?: protos.google.cloud.dataproc.v1.IListJobsRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IJob[], + protos.google.cloud.dataproc.v1.IListJobsRequest|null, + protos.google.cloud.dataproc.v1.IListJobsResponse + ]>; + listJobs( + request: protos.google.cloud.dataproc.v1.IListJobsRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.dataproc.v1.IListJobsRequest, + protos.google.cloud.dataproc.v1.IListJobsResponse|null|undefined, + protos.google.cloud.dataproc.v1.IJob>): void; + listJobs( + request: protos.google.cloud.dataproc.v1.IListJobsRequest, + callback: PaginationCallback< + protos.google.cloud.dataproc.v1.IListJobsRequest, + protos.google.cloud.dataproc.v1.IListJobsResponse|null|undefined, + protos.google.cloud.dataproc.v1.IJob>): void; + listJobs( + request?: protos.google.cloud.dataproc.v1.IListJobsRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.dataproc.v1.IListJobsRequest, + protos.google.cloud.dataproc.v1.IListJobsResponse|null|undefined, + protos.google.cloud.dataproc.v1.IJob>, + callback?: PaginationCallback< + protos.google.cloud.dataproc.v1.IListJobsRequest, + protos.google.cloud.dataproc.v1.IListJobsResponse|null|undefined, + protos.google.cloud.dataproc.v1.IJob>): + Promise<[ + protos.google.cloud.dataproc.v1.IJob[], + protos.google.cloud.dataproc.v1.IListJobsRequest|null, + protos.google.cloud.dataproc.v1.IListJobsResponse + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + }); + this.initialize(); + return this.innerApiCalls.listJobs(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {number} [request.pageSize] + * Optional. The number of results to return in each response. + * @param {string} [request.pageToken] + * Optional. The page token, returned by a previous call, to request the + * next page of results. + * @param {string} [request.clusterName] + * Optional. If set, the returned jobs list includes only jobs that were + * submitted to the named cluster. + * @param {google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher} [request.jobStateMatcher] + * Optional. Specifies enumerated categories of jobs to list. + * (default = match ALL jobs). + * + * If `filter` is provided, `jobStateMatcher` will be ignored. + * @param {string} [request.filter] + * Optional. A filter constraining the jobs to list. Filters are + * case-sensitive and have the following syntax: + * + * [field = value] AND [field [= value]] ... + * + * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label + * key. **value** can be `*` to match all values. + * `status.state` can be either `ACTIVE` or `NON_ACTIVE`. + * Only the logical `AND` operator is supported; space-separated items are + * treated as having an implicit `AND` operator. + * + * Example filter: + * + * status.state = ACTIVE AND labels.env = staging AND labels.starred = * + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing [Job]{@link google.cloud.dataproc.v1.Job} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listJobsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + */ + listJobsStream( + request?: protos.google.cloud.dataproc.v1.IListJobsRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + }); + const defaultCallSettings = this._defaults['listJobs']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listJobs.createStream( + this.innerApiCalls.listJobs as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listJobs`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {number} [request.pageSize] + * Optional. The number of results to return in each response. + * @param {string} [request.pageToken] + * Optional. The page token, returned by a previous call, to request the + * next page of results. + * @param {string} [request.clusterName] + * Optional. If set, the returned jobs list includes only jobs that were + * submitted to the named cluster. + * @param {google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher} [request.jobStateMatcher] + * Optional. Specifies enumerated categories of jobs to list. + * (default = match ALL jobs). + * + * If `filter` is provided, `jobStateMatcher` will be ignored. + * @param {string} [request.filter] + * Optional. A filter constraining the jobs to list. Filters are + * case-sensitive and have the following syntax: + * + * [field = value] AND [field [= value]] ... + * + * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label + * key. **value** can be `*` to match all values. + * `status.state` can be either `ACTIVE` or `NON_ACTIVE`. + * Only the logical `AND` operator is supported; space-separated items are + * treated as having an implicit `AND` operator. + * + * Example filter: + * + * status.state = ACTIVE AND labels.env = staging AND labels.starred = * + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows [async iteration](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols). + * When you iterate the returned iterable, each element will be an object representing + * [Job]{@link google.cloud.dataproc.v1.Job}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + * @example include:samples/generated/v1/job_controller.list_jobs.js + * region_tag:dataproc_v1_generated_JobController_ListJobs_async + */ + listJobsAsync( + request?: protos.google.cloud.dataproc.v1.IListJobsRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'region': request.region ?? '', + }); + const defaultCallSettings = this._defaults['listJobs']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listJobs.asyncIterate( + this.innerApiCalls['listJobs'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + // -------------------- + // -- Path templates -- + // -------------------- + + /** + * Return a fully-qualified batch resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} batch + * @returns {string} Resource name string. + */ + batchPath(project:string,location:string,batch:string) { + return this.pathTemplates.batchPathTemplate.render({ + project: project, + location: location, + batch: batch, + }); + } + + /** + * Parse the project from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the project. + */ + matchProjectFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).project; + } + + /** + * Parse the location from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the location. + */ + matchLocationFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).location; + } + + /** + * Parse the batch from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the batch. + */ + matchBatchFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).batch; + } + + /** + * Return a fully-qualified nodeGroup resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} cluster + * @param {string} node_group + * @returns {string} Resource name string. + */ + nodeGroupPath(project:string,region:string,cluster:string,nodeGroup:string) { + return this.pathTemplates.nodeGroupPathTemplate.render({ + project: project, + region: region, + cluster: cluster, + node_group: nodeGroup, + }); + } + + /** + * Parse the project from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the project. + */ + matchProjectFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).project; + } + + /** + * Parse the region from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the region. + */ + matchRegionFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; + } + + /** + * Parse the cluster from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the cluster. + */ + matchClusterFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).cluster; + } + + /** + * Parse the node_group from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the node_group. + */ + matchNodeGroupFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).node_group; + } + + /** + * Return a fully-qualified projectLocationAutoscalingPolicy resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} autoscaling_policy + * @returns {string} Resource name string. + */ + projectLocationAutoscalingPolicyPath(project:string,location:string,autoscalingPolicy:string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render({ + project: project, + location: location, + autoscaling_policy: autoscalingPolicy, + }); + } + + /** + * Parse the project from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).project; + } + + /** + * Parse the location from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the location. + */ + matchLocationFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).location; + } + + /** + * Parse the autoscaling_policy from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the autoscaling_policy. + */ + matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).autoscaling_policy; + } + + /** + * Return a fully-qualified projectLocationWorkflowTemplate resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} workflow_template + * @returns {string} Resource name string. + */ + projectLocationWorkflowTemplatePath(project:string,location:string,workflowTemplate:string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render({ + project: project, + location: location, + workflow_template: workflowTemplate, + }); + } + + /** + * Parse the project from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).project; + } + + /** + * Parse the location from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the location. + */ + matchLocationFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).location; + } + + /** + * Parse the workflow_template from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the workflow_template. + */ + matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).workflow_template; + } + + /** + * Return a fully-qualified projectRegionAutoscalingPolicy resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} autoscaling_policy + * @returns {string} Resource name string. + */ + projectRegionAutoscalingPolicyPath(project:string,region:string,autoscalingPolicy:string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render({ + project: project, + region: region, + autoscaling_policy: autoscalingPolicy, + }); + } + + /** + * Parse the project from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).project; + } + + /** + * Parse the region from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the region. + */ + matchRegionFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).region; + } + + /** + * Parse the autoscaling_policy from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the autoscaling_policy. + */ + matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).autoscaling_policy; + } + + /** + * Return a fully-qualified projectRegionWorkflowTemplate resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} workflow_template + * @returns {string} Resource name string. + */ + projectRegionWorkflowTemplatePath(project:string,region:string,workflowTemplate:string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render({ + project: project, + region: region, + workflow_template: workflowTemplate, + }); + } + + /** + * Parse the project from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).project; + } + + /** + * Parse the region from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the region. + */ + matchRegionFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).region; + } + + /** + * Parse the workflow_template from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the workflow_template. + */ + matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).workflow_template; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.jobControllerStub && !this._terminated) { + return this.jobControllerStub.then(stub => { + this._terminated = true; + stub.close(); + this.operationsClient.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client_config.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client_config.json new file mode 100644 index 00000000000..5d75711034e --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client_config.json @@ -0,0 +1,69 @@ +{ + "interfaces": { + "google.cloud.dataproc.v1.JobController": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ], + "unavailable": [ + "UNAVAILABLE" + ], + "deadline_exceeded_internal_unavailable": [ + "DEADLINE_EXCEEDED", + "INTERNAL", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "SubmitJob": { + "timeout_millis": 900000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, + "SubmitJobAsOperation": { + "timeout_millis": 900000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, + "GetJob": { + "timeout_millis": 900000, + "retry_codes_name": "deadline_exceeded_internal_unavailable", + "retry_params_name": "default" + }, + "ListJobs": { + "timeout_millis": 900000, + "retry_codes_name": "deadline_exceeded_internal_unavailable", + "retry_params_name": "default" + }, + "UpdateJob": { + "timeout_millis": 900000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, + "CancelJob": { + "timeout_millis": 900000, + "retry_codes_name": "deadline_exceeded_internal_unavailable", + "retry_params_name": "default" + }, + "DeleteJob": { + "timeout_millis": 900000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_proto_list.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_proto_list.json new file mode 100644 index 00000000000..3bb7ccf055a --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_proto_list.json @@ -0,0 +1,10 @@ +[ + "../../protos/google/cloud/dataproc/v1/autoscaling_policies.proto", + "../../protos/google/cloud/dataproc/v1/batches.proto", + "../../protos/google/cloud/dataproc/v1/clusters.proto", + "../../protos/google/cloud/dataproc/v1/jobs.proto", + "../../protos/google/cloud/dataproc/v1/node_groups.proto", + "../../protos/google/cloud/dataproc/v1/operations.proto", + "../../protos/google/cloud/dataproc/v1/shared.proto", + "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" +] diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_client.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_client.ts new file mode 100644 index 00000000000..adf45e861d8 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_client.ts @@ -0,0 +1,1113 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; + +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1/node_group_controller_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './node_group_controller_client_config.json'; +const version = require('../../../package.json').version; + +/** + * The `NodeGroupControllerService` provides methods to manage node groups + * of Compute Engine managed instances. + * @class + * @memberof v1 + */ +export class NodeGroupControllerClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + pathTemplates: {[name: string]: gax.PathTemplate}; + operationsClient: gax.OperationsClient; + nodeGroupControllerStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of NodeGroupControllerClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new NodeGroupControllerClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof NodeGroupControllerClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // This API contains "path templates"; forward-slash-separated + // identifiers to uniquely identify resources within the API. + // Create useful helper objects for these. + this.pathTemplates = { + batchPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/batches/{batch}' + ), + clusterRegionPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/clusters/{cluster}' + ), + nodeGroupPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' + ), + projectPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}' + ), + projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' + ), + projectLocationWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/workflowTemplates/{workflow_template}' + ), + projectRegionAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}' + ), + projectRegionWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' + ), + regionPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}' + ), + }; + + const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [{selector: 'google.iam.v1.IAMPolicy.GetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:getIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:getIamPolicy',body: '*',}], + },{selector: 'google.iam.v1.IAMPolicy.SetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:setIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:setIamPolicy',body: '*',}], + },{selector: 'google.iam.v1.IAMPolicy.TestIamPermissions',post: '/v1/{resource=projects/*/regions/*/clusters/*}:testIamPermissions',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:testIamPermissions',body: '*',}], + },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1/{name=projects/*/regions/*/operations/*}:cancel',},{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.ListOperations',get: '/v1/{name=projects/*/regions/*/operations}',}]; + } + this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + const createNodeGroupResponse = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.NodeGroup') as gax.protobuf.Type; + const createNodeGroupMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.NodeGroupOperationMetadata') as gax.protobuf.Type; + const resizeNodeGroupResponse = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.NodeGroup') as gax.protobuf.Type; + const resizeNodeGroupMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.NodeGroupOperationMetadata') as gax.protobuf.Type; + + this.descriptors.longrunning = { + createNodeGroup: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + createNodeGroupResponse.decode.bind(createNodeGroupResponse), + createNodeGroupMetadata.decode.bind(createNodeGroupMetadata)), + resizeNodeGroup: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + resizeNodeGroupResponse.decode.bind(resizeNodeGroupResponse), + resizeNodeGroupMetadata.decode.bind(resizeNodeGroupMetadata)) + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.dataproc.v1.NodeGroupController', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.nodeGroupControllerStub) { + return this.nodeGroupControllerStub; + } + + // Put together the "service stub" for + // google.cloud.dataproc.v1.NodeGroupController. + this.nodeGroupControllerStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.dataproc.v1.NodeGroupController') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.dataproc.v1.NodeGroupController, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const nodeGroupControllerStubMethods = + ['createNodeGroup', 'resizeNodeGroup', 'getNodeGroup']; + for (const methodName of nodeGroupControllerStubMethods) { + const callPromise = this.nodeGroupControllerStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.longrunning[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.nodeGroupControllerStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'dataproc.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'dataproc.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Gets the resource representation for a node group in a + * cluster. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. The name of the node group to retrieve. + * Format: + * `projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}` + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [NodeGroup]{@link google.cloud.dataproc.v1.NodeGroup}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/node_group_controller.get_node_group.js + * region_tag:dataproc_v1_generated_NodeGroupController_GetNodeGroup_async + */ + getNodeGroup( + request?: protos.google.cloud.dataproc.v1.IGetNodeGroupRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.IGetNodeGroupRequest|undefined, {}|undefined + ]>; + getNodeGroup( + request: protos.google.cloud.dataproc.v1.IGetNodeGroupRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.IGetNodeGroupRequest|null|undefined, + {}|null|undefined>): void; + getNodeGroup( + request: protos.google.cloud.dataproc.v1.IGetNodeGroupRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.IGetNodeGroupRequest|null|undefined, + {}|null|undefined>): void; + getNodeGroup( + request?: protos.google.cloud.dataproc.v1.IGetNodeGroupRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.IGetNodeGroupRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.IGetNodeGroupRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.IGetNodeGroupRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'name': request.name ?? '', + }); + this.initialize(); + return this.innerApiCalls.getNodeGroup(request, options, callback); + } + +/** + * Creates a node group in a cluster. The returned + * {@link google.longrunning.Operation.metadata|Operation.metadata} is + * [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The parent resource where this node group will be created. + * Format: `projects/{project}/regions/{region}/clusters/{cluster}` + * @param {google.cloud.dataproc.v1.NodeGroup} request.nodeGroup + * Required. The node group to create. + * @param {string} [request.nodeGroupId] + * Optional. An optional node group ID. Generated if not specified. + * + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). Cannot begin or end with underscore + * or hyphen. Must consist of from 3 to 33 characters. + * @param {string} [request.requestId] + * Optional. A unique ID used to identify the request. If the server receives + * two + * [CreateNodeGroupRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequests) + * with the same ID, the second request is ignored and the + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. + * + * Recommendation: Set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/node_group_controller.create_node_group.js + * region_tag:dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async + */ + createNodeGroup( + request?: protos.google.cloud.dataproc.v1.ICreateNodeGroupRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + createNodeGroup( + request: protos.google.cloud.dataproc.v1.ICreateNodeGroupRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + createNodeGroup( + request: protos.google.cloud.dataproc.v1.ICreateNodeGroupRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + createNodeGroup( + request?: protos.google.cloud.dataproc.v1.ICreateNodeGroupRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + this.initialize(); + return this.innerApiCalls.createNodeGroup(request, options, callback); + } +/** + * Check the status of the long running operation returned by `createNodeGroup()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/node_group_controller.create_node_group.js + * region_tag:dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async + */ + async checkCreateNodeGroupProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.createNodeGroup, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } +/** + * Resizes a node group in a cluster. The returned + * {@link google.longrunning.Operation.metadata|Operation.metadata} is + * [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. The name of the node group to resize. + * Format: + * `projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}` + * @param {number} request.size + * Required. The number of running instances for the node group to maintain. + * The group adds or removes instances to maintain the number of instances + * specified by this parameter. + * @param {string} [request.requestId] + * Optional. A unique ID used to identify the request. If the server receives + * two + * [ResizeNodeGroupRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.ResizeNodeGroupRequests) + * with the same ID, the second request is ignored and the + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. + * + * Recommendation: Set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {google.protobuf.Duration} [request.gracefulDecommissionTimeout] + * Optional. Timeout for graceful YARN decommissioning. [Graceful + * decommissioning] + * (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) + * allows the removal of nodes from the Compute Engine node group + * without interrupting jobs in progress. This timeout specifies how long to + * wait for jobs in progress to finish before forcefully removing nodes (and + * potentially interrupting jobs). Default timeout is 0 (for forceful + * decommission), and the maximum allowed timeout is 1 day. (see JSON + * representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + * + * Only supported on Dataproc image versions 1.2 and higher. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/node_group_controller.resize_node_group.js + * region_tag:dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async + */ + resizeNodeGroup( + request?: protos.google.cloud.dataproc.v1.IResizeNodeGroupRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + resizeNodeGroup( + request: protos.google.cloud.dataproc.v1.IResizeNodeGroupRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + resizeNodeGroup( + request: protos.google.cloud.dataproc.v1.IResizeNodeGroupRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + resizeNodeGroup( + request?: protos.google.cloud.dataproc.v1.IResizeNodeGroupRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'name': request.name ?? '', + }); + this.initialize(); + return this.innerApiCalls.resizeNodeGroup(request, options, callback); + } +/** + * Check the status of the long running operation returned by `resizeNodeGroup()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/node_group_controller.resize_node_group.js + * region_tag:dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async + */ + async checkResizeNodeGroupProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.resizeNodeGroup, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } + // -------------------- + // -- Path templates -- + // -------------------- + + /** + * Return a fully-qualified batch resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} batch + * @returns {string} Resource name string. + */ + batchPath(project:string,location:string,batch:string) { + return this.pathTemplates.batchPathTemplate.render({ + project: project, + location: location, + batch: batch, + }); + } + + /** + * Parse the project from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the project. + */ + matchProjectFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).project; + } + + /** + * Parse the location from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the location. + */ + matchLocationFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).location; + } + + /** + * Parse the batch from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the batch. + */ + matchBatchFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).batch; + } + + /** + * Return a fully-qualified clusterRegion resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} cluster + * @returns {string} Resource name string. + */ + clusterRegionPath(project:string,region:string,cluster:string) { + return this.pathTemplates.clusterRegionPathTemplate.render({ + project: project, + region: region, + cluster: cluster, + }); + } + + /** + * Parse the project from ClusterRegion resource. + * + * @param {string} clusterRegionName + * A fully-qualified path representing ClusterRegion resource. + * @returns {string} A string representing the project. + */ + matchProjectFromClusterRegionName(clusterRegionName: string) { + return this.pathTemplates.clusterRegionPathTemplate.match(clusterRegionName).project; + } + + /** + * Parse the region from ClusterRegion resource. + * + * @param {string} clusterRegionName + * A fully-qualified path representing ClusterRegion resource. + * @returns {string} A string representing the region. + */ + matchRegionFromClusterRegionName(clusterRegionName: string) { + return this.pathTemplates.clusterRegionPathTemplate.match(clusterRegionName).region; + } + + /** + * Parse the cluster from ClusterRegion resource. + * + * @param {string} clusterRegionName + * A fully-qualified path representing ClusterRegion resource. + * @returns {string} A string representing the cluster. + */ + matchClusterFromClusterRegionName(clusterRegionName: string) { + return this.pathTemplates.clusterRegionPathTemplate.match(clusterRegionName).cluster; + } + + /** + * Return a fully-qualified nodeGroup resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} cluster + * @param {string} node_group + * @returns {string} Resource name string. + */ + nodeGroupPath(project:string,region:string,cluster:string,nodeGroup:string) { + return this.pathTemplates.nodeGroupPathTemplate.render({ + project: project, + region: region, + cluster: cluster, + node_group: nodeGroup, + }); + } + + /** + * Parse the project from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the project. + */ + matchProjectFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).project; + } + + /** + * Parse the region from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the region. + */ + matchRegionFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; + } + + /** + * Parse the cluster from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the cluster. + */ + matchClusterFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).cluster; + } + + /** + * Parse the node_group from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the node_group. + */ + matchNodeGroupFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).node_group; + } + + /** + * Return a fully-qualified project resource name string. + * + * @param {string} project + * @returns {string} Resource name string. + */ + projectPath(project:string) { + return this.pathTemplates.projectPathTemplate.render({ + project: project, + }); + } + + /** + * Parse the project from Project resource. + * + * @param {string} projectName + * A fully-qualified path representing Project resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectName(projectName: string) { + return this.pathTemplates.projectPathTemplate.match(projectName).project; + } + + /** + * Return a fully-qualified projectLocationAutoscalingPolicy resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} autoscaling_policy + * @returns {string} Resource name string. + */ + projectLocationAutoscalingPolicyPath(project:string,location:string,autoscalingPolicy:string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render({ + project: project, + location: location, + autoscaling_policy: autoscalingPolicy, + }); + } + + /** + * Parse the project from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).project; + } + + /** + * Parse the location from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the location. + */ + matchLocationFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).location; + } + + /** + * Parse the autoscaling_policy from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the autoscaling_policy. + */ + matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).autoscaling_policy; + } + + /** + * Return a fully-qualified projectLocationWorkflowTemplate resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} workflow_template + * @returns {string} Resource name string. + */ + projectLocationWorkflowTemplatePath(project:string,location:string,workflowTemplate:string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render({ + project: project, + location: location, + workflow_template: workflowTemplate, + }); + } + + /** + * Parse the project from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).project; + } + + /** + * Parse the location from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the location. + */ + matchLocationFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).location; + } + + /** + * Parse the workflow_template from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the workflow_template. + */ + matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).workflow_template; + } + + /** + * Return a fully-qualified projectRegionAutoscalingPolicy resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} autoscaling_policy + * @returns {string} Resource name string. + */ + projectRegionAutoscalingPolicyPath(project:string,region:string,autoscalingPolicy:string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render({ + project: project, + region: region, + autoscaling_policy: autoscalingPolicy, + }); + } + + /** + * Parse the project from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).project; + } + + /** + * Parse the region from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the region. + */ + matchRegionFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).region; + } + + /** + * Parse the autoscaling_policy from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the autoscaling_policy. + */ + matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).autoscaling_policy; + } + + /** + * Return a fully-qualified projectRegionWorkflowTemplate resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} workflow_template + * @returns {string} Resource name string. + */ + projectRegionWorkflowTemplatePath(project:string,region:string,workflowTemplate:string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render({ + project: project, + region: region, + workflow_template: workflowTemplate, + }); + } + + /** + * Parse the project from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).project; + } + + /** + * Parse the region from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the region. + */ + matchRegionFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).region; + } + + /** + * Parse the workflow_template from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the workflow_template. + */ + matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).workflow_template; + } + + /** + * Return a fully-qualified region resource name string. + * + * @param {string} project + * @param {string} region + * @returns {string} Resource name string. + */ + regionPath(project:string,region:string) { + return this.pathTemplates.regionPathTemplate.render({ + project: project, + region: region, + }); + } + + /** + * Parse the project from Region resource. + * + * @param {string} regionName + * A fully-qualified path representing Region resource. + * @returns {string} A string representing the project. + */ + matchProjectFromRegionName(regionName: string) { + return this.pathTemplates.regionPathTemplate.match(regionName).project; + } + + /** + * Parse the region from Region resource. + * + * @param {string} regionName + * A fully-qualified path representing Region resource. + * @returns {string} A string representing the region. + */ + matchRegionFromRegionName(regionName: string) { + return this.pathTemplates.regionPathTemplate.match(regionName).region; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.nodeGroupControllerStub && !this._terminated) { + return this.nodeGroupControllerStub.then(stub => { + this._terminated = true; + stub.close(); + this.operationsClient.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_client_config.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_client_config.json new file mode 100644 index 00000000000..3e7761ab670 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_client_config.json @@ -0,0 +1,38 @@ +{ + "interfaces": { + "google.cloud.dataproc.v1.NodeGroupController": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "CreateNodeGroup": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ResizeNodeGroup": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "GetNodeGroup": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_proto_list.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_proto_list.json new file mode 100644 index 00000000000..3bb7ccf055a --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_proto_list.json @@ -0,0 +1,10 @@ +[ + "../../protos/google/cloud/dataproc/v1/autoscaling_policies.proto", + "../../protos/google/cloud/dataproc/v1/batches.proto", + "../../protos/google/cloud/dataproc/v1/clusters.proto", + "../../protos/google/cloud/dataproc/v1/jobs.proto", + "../../protos/google/cloud/dataproc/v1/node_groups.proto", + "../../protos/google/cloud/dataproc/v1/operations.proto", + "../../protos/google/cloud/dataproc/v1/shared.proto", + "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" +] diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client.ts new file mode 100644 index 00000000000..ab191a6ed7d --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client.ts @@ -0,0 +1,1561 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1/workflow_template_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './workflow_template_service_client_config.json'; +const version = require('../../../package.json').version; + +/** + * The API interface for managing Workflow Templates in the + * Dataproc API. + * @class + * @memberof v1 + */ +export class WorkflowTemplateServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + pathTemplates: {[name: string]: gax.PathTemplate}; + operationsClient: gax.OperationsClient; + workflowTemplateServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of WorkflowTemplateServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new WorkflowTemplateServiceClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof WorkflowTemplateServiceClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // This API contains "path templates"; forward-slash-separated + // identifiers to uniquely identify resources within the API. + // Create useful helper objects for these. + this.pathTemplates = { + batchPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/batches/{batch}' + ), + nodeGroupPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' + ), + projectPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}' + ), + projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' + ), + projectLocationWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/workflowTemplates/{workflow_template}' + ), + projectRegionAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}' + ), + projectRegionWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' + ), + regionPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}' + ), + }; + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listWorkflowTemplates: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'templates') + }; + + const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [{selector: 'google.iam.v1.IAMPolicy.GetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:getIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:getIamPolicy',body: '*',}], + },{selector: 'google.iam.v1.IAMPolicy.SetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:setIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:setIamPolicy',body: '*',}], + },{selector: 'google.iam.v1.IAMPolicy.TestIamPermissions',post: '/v1/{resource=projects/*/regions/*/clusters/*}:testIamPermissions',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:testIamPermissions',body: '*',}], + },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1/{name=projects/*/regions/*/operations/*}:cancel',},{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.ListOperations',get: '/v1/{name=projects/*/regions/*/operations}',}]; + } + this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + const instantiateWorkflowTemplateResponse = protoFilesRoot.lookup( + '.google.protobuf.Empty') as gax.protobuf.Type; + const instantiateWorkflowTemplateMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.WorkflowMetadata') as gax.protobuf.Type; + const instantiateInlineWorkflowTemplateResponse = protoFilesRoot.lookup( + '.google.protobuf.Empty') as gax.protobuf.Type; + const instantiateInlineWorkflowTemplateMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.WorkflowMetadata') as gax.protobuf.Type; + + this.descriptors.longrunning = { + instantiateWorkflowTemplate: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + instantiateWorkflowTemplateResponse.decode.bind(instantiateWorkflowTemplateResponse), + instantiateWorkflowTemplateMetadata.decode.bind(instantiateWorkflowTemplateMetadata)), + instantiateInlineWorkflowTemplate: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + instantiateInlineWorkflowTemplateResponse.decode.bind(instantiateInlineWorkflowTemplateResponse), + instantiateInlineWorkflowTemplateMetadata.decode.bind(instantiateInlineWorkflowTemplateMetadata)) + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.dataproc.v1.WorkflowTemplateService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.workflowTemplateServiceStub) { + return this.workflowTemplateServiceStub; + } + + // Put together the "service stub" for + // google.cloud.dataproc.v1.WorkflowTemplateService. + this.workflowTemplateServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.dataproc.v1.WorkflowTemplateService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.dataproc.v1.WorkflowTemplateService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const workflowTemplateServiceStubMethods = + ['createWorkflowTemplate', 'getWorkflowTemplate', 'instantiateWorkflowTemplate', 'instantiateInlineWorkflowTemplate', 'updateWorkflowTemplate', 'listWorkflowTemplates', 'deleteWorkflowTemplate']; + for (const methodName of workflowTemplateServiceStubMethods) { + const callPromise = this.workflowTemplateServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + this.descriptors.longrunning[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.workflowTemplateServiceStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'dataproc.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'dataproc.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Creates new workflow template. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The resource name of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.workflowTemplates.create`, the resource name of the + * region has the following format: + * `projects/{project_id}/regions/{region}` + * + * * For `projects.locations.workflowTemplates.create`, the resource name of + * the location has the following format: + * `projects/{project_id}/locations/{location}` + * @param {google.cloud.dataproc.v1.WorkflowTemplate} request.template + * Required. The Dataproc workflow template to create. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [WorkflowTemplate]{@link google.cloud.dataproc.v1.WorkflowTemplate}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/workflow_template_service.create_workflow_template.js + * region_tag:dataproc_v1_generated_WorkflowTemplateService_CreateWorkflowTemplate_async + */ + createWorkflowTemplate( + request?: protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest|undefined, {}|undefined + ]>; + createWorkflowTemplate( + request: protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest|null|undefined, + {}|null|undefined>): void; + createWorkflowTemplate( + request: protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest|null|undefined, + {}|null|undefined>): void; + createWorkflowTemplate( + request?: protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + this.initialize(); + return this.innerApiCalls.createWorkflowTemplate(request, options, callback); + } +/** + * Retrieves the latest workflow template. + * + * Can retrieve previously instantiated template by specifying optional + * version parameter. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. The resource name of the workflow template, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.workflowTemplates.get`, the resource name of the + * template has the following format: + * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + * + * * For `projects.locations.workflowTemplates.get`, the resource name of the + * template has the following format: + * `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` + * @param {number} [request.version] + * Optional. The version of workflow template to retrieve. Only previously + * instantiated versions can be retrieved. + * + * If unspecified, retrieves the current version. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [WorkflowTemplate]{@link google.cloud.dataproc.v1.WorkflowTemplate}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/workflow_template_service.get_workflow_template.js + * region_tag:dataproc_v1_generated_WorkflowTemplateService_GetWorkflowTemplate_async + */ + getWorkflowTemplate( + request?: protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest|undefined, {}|undefined + ]>; + getWorkflowTemplate( + request: protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest|null|undefined, + {}|null|undefined>): void; + getWorkflowTemplate( + request: protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest|null|undefined, + {}|null|undefined>): void; + getWorkflowTemplate( + request?: protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'name': request.name ?? '', + }); + this.initialize(); + return this.innerApiCalls.getWorkflowTemplate(request, options, callback); + } +/** + * Updates (replaces) workflow template. The updated template + * must contain version that matches the current server version. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.dataproc.v1.WorkflowTemplate} request.template + * Required. The updated workflow template. + * + * The `template.version` field must match the current version. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [WorkflowTemplate]{@link google.cloud.dataproc.v1.WorkflowTemplate}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/workflow_template_service.update_workflow_template.js + * region_tag:dataproc_v1_generated_WorkflowTemplateService_UpdateWorkflowTemplate_async + */ + updateWorkflowTemplate( + request?: protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest|undefined, {}|undefined + ]>; + updateWorkflowTemplate( + request: protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest|null|undefined, + {}|null|undefined>): void; + updateWorkflowTemplate( + request: protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest|null|undefined, + {}|null|undefined>): void; + updateWorkflowTemplate( + request?: protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.dataproc.v1.IWorkflowTemplate, + protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'template.name': request.template!.name ?? '', + }); + this.initialize(); + return this.innerApiCalls.updateWorkflowTemplate(request, options, callback); + } +/** + * Deletes a workflow template. It does not cancel in-progress workflows. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. The resource name of the workflow template, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.workflowTemplates.delete`, the resource name + * of the template has the following format: + * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + * + * * For `projects.locations.workflowTemplates.instantiate`, the resource name + * of the template has the following format: + * `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` + * @param {number} [request.version] + * Optional. The version of workflow template to delete. If specified, + * will only delete the template if the current server version matches + * specified version. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/workflow_template_service.delete_workflow_template.js + * region_tag:dataproc_v1_generated_WorkflowTemplateService_DeleteWorkflowTemplate_async + */ + deleteWorkflowTemplate( + request?: protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest|undefined, {}|undefined + ]>; + deleteWorkflowTemplate( + request: protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest|null|undefined, + {}|null|undefined>): void; + deleteWorkflowTemplate( + request: protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest|null|undefined, + {}|null|undefined>): void; + deleteWorkflowTemplate( + request?: protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'name': request.name ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteWorkflowTemplate(request, options, callback); + } + +/** + * Instantiates a template and begins execution. + * + * The returned Operation can be used to track execution of + * workflow by polling + * {@link google.longrunning.Operations.GetOperation|operations.get}. + * The Operation will complete when entire workflow is finished. + * + * The running workflow can be aborted via + * {@link google.longrunning.Operations.CancelOperation|operations.cancel}. + * This will cause any inflight jobs to be cancelled and workflow-owned + * clusters to be deleted. + * + * The {@link google.longrunning.Operation.metadata|Operation.metadata} will be + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * Also see [Using + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * + * On successful completion, + * {@link google.longrunning.Operation.response|Operation.response} will be + * {@link google.protobuf.Empty|Empty}. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. The resource name of the workflow template, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.workflowTemplates.instantiate`, the resource name + * of the template has the following format: + * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + * + * * For `projects.locations.workflowTemplates.instantiate`, the resource name + * of the template has the following format: + * `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` + * @param {number} [request.version] + * Optional. The version of workflow template to instantiate. If specified, + * the workflow will be instantiated only if the current version of + * the workflow template has the supplied version. + * + * This option cannot be used to instantiate a previous version of + * workflow template. + * @param {string} [request.requestId] + * Optional. A tag that prevents multiple concurrent workflow + * instances with the same tag from running. This mitigates risk of + * concurrent instances started due to retries. + * + * It is recommended to always set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The tag must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {number[]} [request.parameters] + * Optional. Map from parameter names to values that should be used for those + * parameters. Values may not exceed 1000 characters. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/workflow_template_service.instantiate_workflow_template.js + * region_tag:dataproc_v1_generated_WorkflowTemplateService_InstantiateWorkflowTemplate_async + */ + instantiateWorkflowTemplate( + request?: protos.google.cloud.dataproc.v1.IInstantiateWorkflowTemplateRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + instantiateWorkflowTemplate( + request: protos.google.cloud.dataproc.v1.IInstantiateWorkflowTemplateRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + instantiateWorkflowTemplate( + request: protos.google.cloud.dataproc.v1.IInstantiateWorkflowTemplateRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + instantiateWorkflowTemplate( + request?: protos.google.cloud.dataproc.v1.IInstantiateWorkflowTemplateRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'name': request.name ?? '', + }); + this.initialize(); + return this.innerApiCalls.instantiateWorkflowTemplate(request, options, callback); + } +/** + * Check the status of the long running operation returned by `instantiateWorkflowTemplate()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/workflow_template_service.instantiate_workflow_template.js + * region_tag:dataproc_v1_generated_WorkflowTemplateService_InstantiateWorkflowTemplate_async + */ + async checkInstantiateWorkflowTemplateProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.instantiateWorkflowTemplate, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } +/** + * Instantiates a template and begins execution. + * + * This method is equivalent to executing the sequence + * {@link google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate|CreateWorkflowTemplate}, {@link google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate|InstantiateWorkflowTemplate}, + * {@link google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate|DeleteWorkflowTemplate}. + * + * The returned Operation can be used to track execution of + * workflow by polling + * {@link google.longrunning.Operations.GetOperation|operations.get}. + * The Operation will complete when entire workflow is finished. + * + * The running workflow can be aborted via + * {@link google.longrunning.Operations.CancelOperation|operations.cancel}. + * This will cause any inflight jobs to be cancelled and workflow-owned + * clusters to be deleted. + * + * The {@link google.longrunning.Operation.metadata|Operation.metadata} will be + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * Also see [Using + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * + * On successful completion, + * {@link google.longrunning.Operation.response|Operation.response} will be + * {@link google.protobuf.Empty|Empty}. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The resource name of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.workflowTemplates,instantiateinline`, the resource + * name of the region has the following format: + * `projects/{project_id}/regions/{region}` + * + * * For `projects.locations.workflowTemplates.instantiateinline`, the + * resource name of the location has the following format: + * `projects/{project_id}/locations/{location}` + * @param {google.cloud.dataproc.v1.WorkflowTemplate} request.template + * Required. The workflow template to instantiate. + * @param {string} [request.requestId] + * Optional. A tag that prevents multiple concurrent workflow + * instances with the same tag from running. This mitigates risk of + * concurrent instances started due to retries. + * + * It is recommended to always set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The tag must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/workflow_template_service.instantiate_inline_workflow_template.js + * region_tag:dataproc_v1_generated_WorkflowTemplateService_InstantiateInlineWorkflowTemplate_async + */ + instantiateInlineWorkflowTemplate( + request?: protos.google.cloud.dataproc.v1.IInstantiateInlineWorkflowTemplateRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + instantiateInlineWorkflowTemplate( + request: protos.google.cloud.dataproc.v1.IInstantiateInlineWorkflowTemplateRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + instantiateInlineWorkflowTemplate( + request: protos.google.cloud.dataproc.v1.IInstantiateInlineWorkflowTemplateRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + instantiateInlineWorkflowTemplate( + request?: protos.google.cloud.dataproc.v1.IInstantiateInlineWorkflowTemplateRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + this.initialize(); + return this.innerApiCalls.instantiateInlineWorkflowTemplate(request, options, callback); + } +/** + * Check the status of the long running operation returned by `instantiateInlineWorkflowTemplate()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/workflow_template_service.instantiate_inline_workflow_template.js + * region_tag:dataproc_v1_generated_WorkflowTemplateService_InstantiateInlineWorkflowTemplate_async + */ + async checkInstantiateInlineWorkflowTemplateProgress(name: string): Promise>{ + const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.instantiateInlineWorkflowTemplate, this._gaxModule.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } + /** + * Lists workflows that match the specified filter in the request. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The resource name of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.workflowTemplates,list`, the resource + * name of the region has the following format: + * `projects/{project_id}/regions/{region}` + * + * * For `projects.locations.workflowTemplates.list`, the + * resource name of the location has the following format: + * `projects/{project_id}/locations/{location}` + * @param {number} [request.pageSize] + * Optional. The maximum number of results to return in each response. + * @param {string} [request.pageToken] + * Optional. The page token, returned by a previous call, to request the + * next page of results. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of [WorkflowTemplate]{@link google.cloud.dataproc.v1.WorkflowTemplate}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listWorkflowTemplatesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + */ + listWorkflowTemplates( + request?: protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.dataproc.v1.IWorkflowTemplate[], + protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest|null, + protos.google.cloud.dataproc.v1.IListWorkflowTemplatesResponse + ]>; + listWorkflowTemplates( + request: protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, + protos.google.cloud.dataproc.v1.IListWorkflowTemplatesResponse|null|undefined, + protos.google.cloud.dataproc.v1.IWorkflowTemplate>): void; + listWorkflowTemplates( + request: protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, + callback: PaginationCallback< + protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, + protos.google.cloud.dataproc.v1.IListWorkflowTemplatesResponse|null|undefined, + protos.google.cloud.dataproc.v1.IWorkflowTemplate>): void; + listWorkflowTemplates( + request?: protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, + protos.google.cloud.dataproc.v1.IListWorkflowTemplatesResponse|null|undefined, + protos.google.cloud.dataproc.v1.IWorkflowTemplate>, + callback?: PaginationCallback< + protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, + protos.google.cloud.dataproc.v1.IListWorkflowTemplatesResponse|null|undefined, + protos.google.cloud.dataproc.v1.IWorkflowTemplate>): + Promise<[ + protos.google.cloud.dataproc.v1.IWorkflowTemplate[], + protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest|null, + protos.google.cloud.dataproc.v1.IListWorkflowTemplatesResponse + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + this.initialize(); + return this.innerApiCalls.listWorkflowTemplates(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The resource name of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.workflowTemplates,list`, the resource + * name of the region has the following format: + * `projects/{project_id}/regions/{region}` + * + * * For `projects.locations.workflowTemplates.list`, the + * resource name of the location has the following format: + * `projects/{project_id}/locations/{location}` + * @param {number} [request.pageSize] + * Optional. The maximum number of results to return in each response. + * @param {string} [request.pageToken] + * Optional. The page token, returned by a previous call, to request the + * next page of results. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing [WorkflowTemplate]{@link google.cloud.dataproc.v1.WorkflowTemplate} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listWorkflowTemplatesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + */ + listWorkflowTemplatesStream( + request?: protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + const defaultCallSettings = this._defaults['listWorkflowTemplates']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listWorkflowTemplates.createStream( + this.innerApiCalls.listWorkflowTemplates as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listWorkflowTemplates`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The resource name of the region or location, as described + * in https://cloud.google.com/apis/design/resource_names. + * + * * For `projects.regions.workflowTemplates,list`, the resource + * name of the region has the following format: + * `projects/{project_id}/regions/{region}` + * + * * For `projects.locations.workflowTemplates.list`, the + * resource name of the location has the following format: + * `projects/{project_id}/locations/{location}` + * @param {number} [request.pageSize] + * Optional. The maximum number of results to return in each response. + * @param {string} [request.pageToken] + * Optional. The page token, returned by a previous call, to request the + * next page of results. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows [async iteration](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols). + * When you iterate the returned iterable, each element will be an object representing + * [WorkflowTemplate]{@link google.cloud.dataproc.v1.WorkflowTemplate}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) + * for more details and examples. + * @example include:samples/generated/v1/workflow_template_service.list_workflow_templates.js + * region_tag:dataproc_v1_generated_WorkflowTemplateService_ListWorkflowTemplates_async + */ + listWorkflowTemplatesAsync( + request?: protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'parent': request.parent ?? '', + }); + const defaultCallSettings = this._defaults['listWorkflowTemplates']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listWorkflowTemplates.asyncIterate( + this.innerApiCalls['listWorkflowTemplates'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + // -------------------- + // -- Path templates -- + // -------------------- + + /** + * Return a fully-qualified batch resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} batch + * @returns {string} Resource name string. + */ + batchPath(project:string,location:string,batch:string) { + return this.pathTemplates.batchPathTemplate.render({ + project: project, + location: location, + batch: batch, + }); + } + + /** + * Parse the project from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the project. + */ + matchProjectFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).project; + } + + /** + * Parse the location from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the location. + */ + matchLocationFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).location; + } + + /** + * Parse the batch from Batch resource. + * + * @param {string} batchName + * A fully-qualified path representing Batch resource. + * @returns {string} A string representing the batch. + */ + matchBatchFromBatchName(batchName: string) { + return this.pathTemplates.batchPathTemplate.match(batchName).batch; + } + + /** + * Return a fully-qualified nodeGroup resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} cluster + * @param {string} node_group + * @returns {string} Resource name string. + */ + nodeGroupPath(project:string,region:string,cluster:string,nodeGroup:string) { + return this.pathTemplates.nodeGroupPathTemplate.render({ + project: project, + region: region, + cluster: cluster, + node_group: nodeGroup, + }); + } + + /** + * Parse the project from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the project. + */ + matchProjectFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).project; + } + + /** + * Parse the region from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the region. + */ + matchRegionFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; + } + + /** + * Parse the cluster from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the cluster. + */ + matchClusterFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).cluster; + } + + /** + * Parse the node_group from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the node_group. + */ + matchNodeGroupFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).node_group; + } + + /** + * Return a fully-qualified project resource name string. + * + * @param {string} project + * @returns {string} Resource name string. + */ + projectPath(project:string) { + return this.pathTemplates.projectPathTemplate.render({ + project: project, + }); + } + + /** + * Parse the project from Project resource. + * + * @param {string} projectName + * A fully-qualified path representing Project resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectName(projectName: string) { + return this.pathTemplates.projectPathTemplate.match(projectName).project; + } + + /** + * Return a fully-qualified projectLocationAutoscalingPolicy resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} autoscaling_policy + * @returns {string} Resource name string. + */ + projectLocationAutoscalingPolicyPath(project:string,location:string,autoscalingPolicy:string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render({ + project: project, + location: location, + autoscaling_policy: autoscalingPolicy, + }); + } + + /** + * Parse the project from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).project; + } + + /** + * Parse the location from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the location. + */ + matchLocationFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).location; + } + + /** + * Parse the autoscaling_policy from ProjectLocationAutoscalingPolicy resource. + * + * @param {string} projectLocationAutoscalingPolicyName + * A fully-qualified path representing project_location_autoscaling_policy resource. + * @returns {string} A string representing the autoscaling_policy. + */ + matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).autoscaling_policy; + } + + /** + * Return a fully-qualified projectLocationWorkflowTemplate resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} workflow_template + * @returns {string} Resource name string. + */ + projectLocationWorkflowTemplatePath(project:string,location:string,workflowTemplate:string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render({ + project: project, + location: location, + workflow_template: workflowTemplate, + }); + } + + /** + * Parse the project from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).project; + } + + /** + * Parse the location from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the location. + */ + matchLocationFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).location; + } + + /** + * Parse the workflow_template from ProjectLocationWorkflowTemplate resource. + * + * @param {string} projectLocationWorkflowTemplateName + * A fully-qualified path representing project_location_workflow_template resource. + * @returns {string} A string representing the workflow_template. + */ + matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).workflow_template; + } + + /** + * Return a fully-qualified projectRegionAutoscalingPolicy resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} autoscaling_policy + * @returns {string} Resource name string. + */ + projectRegionAutoscalingPolicyPath(project:string,region:string,autoscalingPolicy:string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render({ + project: project, + region: region, + autoscaling_policy: autoscalingPolicy, + }); + } + + /** + * Parse the project from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).project; + } + + /** + * Parse the region from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the region. + */ + matchRegionFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).region; + } + + /** + * Parse the autoscaling_policy from ProjectRegionAutoscalingPolicy resource. + * + * @param {string} projectRegionAutoscalingPolicyName + * A fully-qualified path representing project_region_autoscaling_policy resource. + * @returns {string} A string representing the autoscaling_policy. + */ + matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).autoscaling_policy; + } + + /** + * Return a fully-qualified projectRegionWorkflowTemplate resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} workflow_template + * @returns {string} Resource name string. + */ + projectRegionWorkflowTemplatePath(project:string,region:string,workflowTemplate:string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render({ + project: project, + region: region, + workflow_template: workflowTemplate, + }); + } + + /** + * Parse the project from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).project; + } + + /** + * Parse the region from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the region. + */ + matchRegionFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).region; + } + + /** + * Parse the workflow_template from ProjectRegionWorkflowTemplate resource. + * + * @param {string} projectRegionWorkflowTemplateName + * A fully-qualified path representing project_region_workflow_template resource. + * @returns {string} A string representing the workflow_template. + */ + matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).workflow_template; + } + + /** + * Return a fully-qualified region resource name string. + * + * @param {string} project + * @param {string} region + * @returns {string} Resource name string. + */ + regionPath(project:string,region:string) { + return this.pathTemplates.regionPathTemplate.render({ + project: project, + region: region, + }); + } + + /** + * Parse the project from Region resource. + * + * @param {string} regionName + * A fully-qualified path representing Region resource. + * @returns {string} A string representing the project. + */ + matchProjectFromRegionName(regionName: string) { + return this.pathTemplates.regionPathTemplate.match(regionName).project; + } + + /** + * Parse the region from Region resource. + * + * @param {string} regionName + * A fully-qualified path representing Region resource. + * @returns {string} A string representing the region. + */ + matchRegionFromRegionName(regionName: string) { + return this.pathTemplates.regionPathTemplate.match(regionName).region; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.workflowTemplateServiceStub && !this._terminated) { + return this.workflowTemplateServiceStub.then(stub => { + this._terminated = true; + stub.close(); + this.operationsClient.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client_config.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client_config.json new file mode 100644 index 00000000000..62d3aa9b9a0 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client_config.json @@ -0,0 +1,69 @@ +{ + "interfaces": { + "google.cloud.dataproc.v1.WorkflowTemplateService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ], + "unavailable": [ + "UNAVAILABLE" + ], + "deadline_exceeded_internal_unavailable": [ + "DEADLINE_EXCEEDED", + "INTERNAL", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "CreateWorkflowTemplate": { + "timeout_millis": 600000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, + "GetWorkflowTemplate": { + "timeout_millis": 600000, + "retry_codes_name": "deadline_exceeded_internal_unavailable", + "retry_params_name": "default" + }, + "InstantiateWorkflowTemplate": { + "timeout_millis": 600000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, + "InstantiateInlineWorkflowTemplate": { + "timeout_millis": 600000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, + "UpdateWorkflowTemplate": { + "timeout_millis": 600000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, + "ListWorkflowTemplates": { + "timeout_millis": 600000, + "retry_codes_name": "deadline_exceeded_internal_unavailable", + "retry_params_name": "default" + }, + "DeleteWorkflowTemplate": { + "timeout_millis": 600000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_proto_list.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_proto_list.json new file mode 100644 index 00000000000..3bb7ccf055a --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_proto_list.json @@ -0,0 +1,10 @@ +[ + "../../protos/google/cloud/dataproc/v1/autoscaling_policies.proto", + "../../protos/google/cloud/dataproc/v1/batches.proto", + "../../protos/google/cloud/dataproc/v1/clusters.proto", + "../../protos/google/cloud/dataproc/v1/jobs.proto", + "../../protos/google/cloud/dataproc/v1/node_groups.proto", + "../../protos/google/cloud/dataproc/v1/operations.proto", + "../../protos/google/cloud/dataproc/v1/shared.proto", + "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" +] diff --git a/owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.js new file mode 100644 index 00000000000..0b94f82406b --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.js @@ -0,0 +1,32 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars */ +const dataproc = require('@google-cloud/dataproc'); + +function main() { + const autoscalingPolicyServiceClient = new dataproc.AutoscalingPolicyServiceClient(); + const batchControllerClient = new dataproc.BatchControllerClient(); + const clusterControllerClient = new dataproc.ClusterControllerClient(); + const jobControllerClient = new dataproc.JobControllerClient(); + const nodeGroupControllerClient = new dataproc.NodeGroupControllerClient(); + const workflowTemplateServiceClient = new dataproc.WorkflowTemplateServiceClient(); +} + +main(); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.ts new file mode 100644 index 00000000000..fcc5e31dcea --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.ts @@ -0,0 +1,62 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {AutoscalingPolicyServiceClient, BatchControllerClient, ClusterControllerClient, JobControllerClient, NodeGroupControllerClient, WorkflowTemplateServiceClient} from '@google-cloud/dataproc'; + +// check that the client class type name can be used +function doStuffWithAutoscalingPolicyServiceClient(client: AutoscalingPolicyServiceClient) { + client.close(); +} +function doStuffWithBatchControllerClient(client: BatchControllerClient) { + client.close(); +} +function doStuffWithClusterControllerClient(client: ClusterControllerClient) { + client.close(); +} +function doStuffWithJobControllerClient(client: JobControllerClient) { + client.close(); +} +function doStuffWithNodeGroupControllerClient(client: NodeGroupControllerClient) { + client.close(); +} +function doStuffWithWorkflowTemplateServiceClient(client: WorkflowTemplateServiceClient) { + client.close(); +} + +function main() { + // check that the client instance can be created + const autoscalingPolicyServiceClient = new AutoscalingPolicyServiceClient(); + doStuffWithAutoscalingPolicyServiceClient(autoscalingPolicyServiceClient); + // check that the client instance can be created + const batchControllerClient = new BatchControllerClient(); + doStuffWithBatchControllerClient(batchControllerClient); + // check that the client instance can be created + const clusterControllerClient = new ClusterControllerClient(); + doStuffWithClusterControllerClient(clusterControllerClient); + // check that the client instance can be created + const jobControllerClient = new JobControllerClient(); + doStuffWithJobControllerClient(jobControllerClient); + // check that the client instance can be created + const nodeGroupControllerClient = new NodeGroupControllerClient(); + doStuffWithNodeGroupControllerClient(nodeGroupControllerClient); + // check that the client instance can be created + const workflowTemplateServiceClient = new WorkflowTemplateServiceClient(); + doStuffWithWorkflowTemplateServiceClient(workflowTemplateServiceClient); +} + +main(); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/system-test/install.ts b/owl-bot-staging/google-cloud-dataproc/v1/system-test/install.ts new file mode 100644 index 00000000000..557a57558e1 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/system-test/install.ts @@ -0,0 +1,49 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {packNTest} from 'pack-n-play'; +import {readFileSync} from 'fs'; +import {describe, it} from 'mocha'; + +describe('📦 pack-n-play test', () => { + + it('TypeScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'TypeScript user can use the type definitions', + ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() + } + }; + await packNTest(options); + }); + + it('JavaScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'JavaScript user can use the library', + ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() + } + }; + await packNTest(options); + }); + +}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_autoscaling_policy_service_v1.ts b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_autoscaling_policy_service_v1.ts new file mode 100644 index 00000000000..ff961ac6ff2 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_autoscaling_policy_service_v1.ts @@ -0,0 +1,1233 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as autoscalingpolicyserviceModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v1.AutoscalingPolicyServiceClient', () => { + describe('Common methods', () => { + it('has servicePath', () => { + const servicePath = autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.autoscalingPolicyServiceStub, undefined); + await client.initialize(); + assert(client.autoscalingPolicyServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.autoscalingPolicyServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.autoscalingPolicyServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('createAutoscalingPolicy', () => { + it('invokes createAutoscalingPolicy without error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.AutoscalingPolicy() + ); + client.innerApiCalls.createAutoscalingPolicy = stubSimpleCall(expectedResponse); + const [response] = await client.createAutoscalingPolicy(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.createAutoscalingPolicy as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createAutoscalingPolicy as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createAutoscalingPolicy without error using callback', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.AutoscalingPolicy() + ); + client.innerApiCalls.createAutoscalingPolicy = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.createAutoscalingPolicy( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IAutoscalingPolicy|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.createAutoscalingPolicy as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createAutoscalingPolicy as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createAutoscalingPolicy with error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.createAutoscalingPolicy = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.createAutoscalingPolicy(request), expectedError); + const actualRequest = (client.innerApiCalls.createAutoscalingPolicy as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createAutoscalingPolicy as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createAutoscalingPolicy with closed client', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest', ['parent']); + request.parent = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.createAutoscalingPolicy(request), expectedError); + }); + }); + + describe('updateAutoscalingPolicy', () => { + it('invokes updateAutoscalingPolicy without error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest() + ); + request.policy ??= {}; + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest', ['policy', 'name']); + request.policy.name = defaultValue1; + const expectedHeaderRequestParams = `policy.name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.AutoscalingPolicy() + ); + client.innerApiCalls.updateAutoscalingPolicy = stubSimpleCall(expectedResponse); + const [response] = await client.updateAutoscalingPolicy(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateAutoscalingPolicy as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateAutoscalingPolicy as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateAutoscalingPolicy without error using callback', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest() + ); + request.policy ??= {}; + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest', ['policy', 'name']); + request.policy.name = defaultValue1; + const expectedHeaderRequestParams = `policy.name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.AutoscalingPolicy() + ); + client.innerApiCalls.updateAutoscalingPolicy = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.updateAutoscalingPolicy( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IAutoscalingPolicy|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateAutoscalingPolicy as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateAutoscalingPolicy as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateAutoscalingPolicy with error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest() + ); + request.policy ??= {}; + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest', ['policy', 'name']); + request.policy.name = defaultValue1; + const expectedHeaderRequestParams = `policy.name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.updateAutoscalingPolicy = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.updateAutoscalingPolicy(request), expectedError); + const actualRequest = (client.innerApiCalls.updateAutoscalingPolicy as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateAutoscalingPolicy as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateAutoscalingPolicy with closed client', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest() + ); + request.policy ??= {}; + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest', ['policy', 'name']); + request.policy.name = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.updateAutoscalingPolicy(request), expectedError); + }); + }); + + describe('getAutoscalingPolicy', () => { + it('invokes getAutoscalingPolicy without error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.AutoscalingPolicy() + ); + client.innerApiCalls.getAutoscalingPolicy = stubSimpleCall(expectedResponse); + const [response] = await client.getAutoscalingPolicy(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getAutoscalingPolicy as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getAutoscalingPolicy as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getAutoscalingPolicy without error using callback', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.AutoscalingPolicy() + ); + client.innerApiCalls.getAutoscalingPolicy = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getAutoscalingPolicy( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IAutoscalingPolicy|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getAutoscalingPolicy as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getAutoscalingPolicy as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getAutoscalingPolicy with error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.getAutoscalingPolicy = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getAutoscalingPolicy(request), expectedError); + const actualRequest = (client.innerApiCalls.getAutoscalingPolicy as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getAutoscalingPolicy as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getAutoscalingPolicy with closed client', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest', ['name']); + request.name = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getAutoscalingPolicy(request), expectedError); + }); + }); + + describe('deleteAutoscalingPolicy', () => { + it('invokes deleteAutoscalingPolicy without error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteAutoscalingPolicy = stubSimpleCall(expectedResponse); + const [response] = await client.deleteAutoscalingPolicy(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteAutoscalingPolicy as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteAutoscalingPolicy as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteAutoscalingPolicy without error using callback', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteAutoscalingPolicy = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteAutoscalingPolicy( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteAutoscalingPolicy as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteAutoscalingPolicy as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteAutoscalingPolicy with error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteAutoscalingPolicy = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteAutoscalingPolicy(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteAutoscalingPolicy as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteAutoscalingPolicy as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteAutoscalingPolicy with closed client', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest', ['name']); + request.name = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteAutoscalingPolicy(request), expectedError); + }); + }); + + describe('listAutoscalingPolicies', () => { + it('invokes listAutoscalingPolicies without error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), + ]; + client.innerApiCalls.listAutoscalingPolicies = stubSimpleCall(expectedResponse); + const [response] = await client.listAutoscalingPolicies(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listAutoscalingPolicies as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listAutoscalingPolicies as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listAutoscalingPolicies without error using callback', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), + ]; + client.innerApiCalls.listAutoscalingPolicies = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listAutoscalingPolicies( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IAutoscalingPolicy[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listAutoscalingPolicies as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listAutoscalingPolicies as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listAutoscalingPolicies with error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.listAutoscalingPolicies = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listAutoscalingPolicies(request), expectedError); + const actualRequest = (client.innerApiCalls.listAutoscalingPolicies as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listAutoscalingPolicies as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listAutoscalingPoliciesStream without error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), + ]; + client.descriptors.page.listAutoscalingPolicies.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listAutoscalingPoliciesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.dataproc.v1.AutoscalingPolicy[] = []; + stream.on('data', (response: protos.google.cloud.dataproc.v1.AutoscalingPolicy) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listAutoscalingPolicies.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listAutoscalingPolicies, request)); + assert( + (client.descriptors.page.listAutoscalingPolicies.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listAutoscalingPoliciesStream with error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.descriptors.page.listAutoscalingPolicies.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listAutoscalingPoliciesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.dataproc.v1.AutoscalingPolicy[] = []; + stream.on('data', (response: protos.google.cloud.dataproc.v1.AutoscalingPolicy) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listAutoscalingPolicies.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listAutoscalingPolicies, request)); + assert( + (client.descriptors.page.listAutoscalingPolicies.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listAutoscalingPolicies without error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), + ]; + client.descriptors.page.listAutoscalingPolicies.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.dataproc.v1.IAutoscalingPolicy[] = []; + const iterable = client.listAutoscalingPoliciesAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listAutoscalingPolicies.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listAutoscalingPolicies.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listAutoscalingPolicies with error', async () => { + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.descriptors.page.listAutoscalingPolicies.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listAutoscalingPoliciesAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.dataproc.v1.IAutoscalingPolicy[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listAutoscalingPolicies.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listAutoscalingPolicies.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); + + describe('Path templates', () => { + + describe('batch', () => { + const fakePath = "/rendered/path/batch"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + batch: "batchValue", + }; + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.batchPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.batchPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('batchPath', () => { + const result = client.batchPath("projectValue", "locationValue", "batchValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.batchPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromBatchName', () => { + const result = client.matchProjectFromBatchName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromBatchName', () => { + const result = client.matchLocationFromBatchName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchBatchFromBatchName', () => { + const result = client.matchBatchFromBatchName(fakePath); + assert.strictEqual(result, "batchValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('location', () => { + const fakePath = "/rendered/path/location"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + }; + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.locationPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.locationPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('locationPath', () => { + const result = client.locationPath("projectValue", "locationValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.locationPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromLocationName', () => { + const result = client.matchProjectFromLocationName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.locationPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromLocationName', () => { + const result = client.matchLocationFromLocationName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.locationPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('nodeGroup', () => { + const fakePath = "/rendered/path/nodeGroup"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + cluster: "clusterValue", + node_group: "nodeGroupValue", + }; + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.nodeGroupPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.nodeGroupPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('nodeGroupPath', () => { + const result = client.nodeGroupPath("projectValue", "regionValue", "clusterValue", "nodeGroupValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromNodeGroupName', () => { + const result = client.matchProjectFromNodeGroupName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromNodeGroupName', () => { + const result = client.matchRegionFromNodeGroupName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchClusterFromNodeGroupName', () => { + const result = client.matchClusterFromNodeGroupName(fakePath); + assert.strictEqual(result, "clusterValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchNodeGroupFromNodeGroupName', () => { + const result = client.matchNodeGroupFromNodeGroupName(fakePath); + assert.strictEqual(result, "nodeGroupValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('project', () => { + const fakePath = "/rendered/path/project"; + const expectedParameters = { + project: "projectValue", + }; + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectPath', () => { + const result = client.projectPath("projectValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectName', () => { + const result = client.matchProjectFromProjectName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectLocationAutoscalingPolicy', () => { + const fakePath = "/rendered/path/projectLocationAutoscalingPolicy"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + autoscaling_policy: "autoscalingPolicyValue", + }; + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationAutoscalingPolicyPath', () => { + const result = client.projectLocationAutoscalingPolicyPath("projectValue", "locationValue", "autoscalingPolicyValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchProjectFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchLocationFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "autoscalingPolicyValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectLocationWorkflowTemplate', () => { + const fakePath = "/rendered/path/projectLocationWorkflowTemplate"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + workflow_template: "workflowTemplateValue", + }; + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationWorkflowTemplatePath', () => { + const result = client.projectLocationWorkflowTemplatePath("projectValue", "locationValue", "workflowTemplateValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchProjectFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchLocationFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchWorkflowTemplateFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "workflowTemplateValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectRegionAutoscalingPolicy', () => { + const fakePath = "/rendered/path/projectRegionAutoscalingPolicy"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + autoscaling_policy: "autoscalingPolicyValue", + }; + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionAutoscalingPolicyPath', () => { + const result = client.projectRegionAutoscalingPolicyPath("projectValue", "regionValue", "autoscalingPolicyValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchProjectFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchRegionFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "autoscalingPolicyValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectRegionWorkflowTemplate', () => { + const fakePath = "/rendered/path/projectRegionWorkflowTemplate"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + workflow_template: "workflowTemplateValue", + }; + const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionWorkflowTemplatePath', () => { + const result = client.projectRegionWorkflowTemplatePath("projectValue", "regionValue", "workflowTemplateValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchProjectFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchRegionFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchWorkflowTemplateFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "workflowTemplateValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + }); +}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_batch_controller_v1.ts b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_batch_controller_v1.ts new file mode 100644 index 00000000000..f30e15246ba --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_batch_controller_v1.ts @@ -0,0 +1,1183 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as batchcontrollerModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v1.BatchControllerClient', () => { + describe('Common methods', () => { + it('has servicePath', () => { + const servicePath = batchcontrollerModule.v1.BatchControllerClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = batchcontrollerModule.v1.BatchControllerClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = batchcontrollerModule.v1.BatchControllerClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new batchcontrollerModule.v1.BatchControllerClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.batchControllerStub, undefined); + await client.initialize(); + assert(client.batchControllerStub); + }); + + it('has close method for the initialized client', done => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.batchControllerStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.batchControllerStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getBatch', () => { + it('invokes getBatch without error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetBatchRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetBatchRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.Batch() + ); + client.innerApiCalls.getBatch = stubSimpleCall(expectedResponse); + const [response] = await client.getBatch(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getBatch as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getBatch as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getBatch without error using callback', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetBatchRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetBatchRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.Batch() + ); + client.innerApiCalls.getBatch = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getBatch( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IBatch|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getBatch as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getBatch as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getBatch with error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetBatchRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetBatchRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.getBatch = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getBatch(request), expectedError); + const actualRequest = (client.innerApiCalls.getBatch as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getBatch as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getBatch with closed client', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetBatchRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetBatchRequest', ['name']); + request.name = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getBatch(request), expectedError); + }); + }); + + describe('deleteBatch', () => { + it('invokes deleteBatch without error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteBatchRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteBatchRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteBatch = stubSimpleCall(expectedResponse); + const [response] = await client.deleteBatch(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteBatch as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteBatch as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteBatch without error using callback', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteBatchRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteBatchRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteBatch = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteBatch( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteBatch as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteBatch as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteBatch with error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteBatchRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteBatchRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteBatch = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteBatch(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteBatch as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteBatch as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteBatch with closed client', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteBatchRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteBatchRequest', ['name']); + request.name = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteBatch(request), expectedError); + }); + }); + + describe('createBatch', () => { + it('invokes createBatch without error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateBatchRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateBatchRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.createBatch = stubLongRunningCall(expectedResponse); + const [operation] = await client.createBatch(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.createBatch as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createBatch as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createBatch without error using callback', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateBatchRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateBatchRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.createBatch = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.createBatch( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.createBatch as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createBatch as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createBatch with call error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateBatchRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateBatchRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.createBatch = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.createBatch(request), expectedError); + const actualRequest = (client.innerApiCalls.createBatch as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createBatch as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createBatch with LRO error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateBatchRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateBatchRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.createBatch = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.createBatch(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.createBatch as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createBatch as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkCreateBatchProgress without error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkCreateBatchProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkCreateBatchProgress with error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkCreateBatchProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + + describe('listBatches', () => { + it('invokes listBatches without error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListBatchesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), + ]; + client.innerApiCalls.listBatches = stubSimpleCall(expectedResponse); + const [response] = await client.listBatches(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listBatches as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listBatches as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listBatches without error using callback', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListBatchesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), + ]; + client.innerApiCalls.listBatches = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listBatches( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IBatch[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listBatches as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listBatches as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listBatches with error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListBatchesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.listBatches = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listBatches(request), expectedError); + const actualRequest = (client.innerApiCalls.listBatches as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listBatches as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listBatchesStream without error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListBatchesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), + ]; + client.descriptors.page.listBatches.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listBatchesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.dataproc.v1.Batch[] = []; + stream.on('data', (response: protos.google.cloud.dataproc.v1.Batch) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listBatches.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listBatches, request)); + assert( + (client.descriptors.page.listBatches.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listBatchesStream with error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListBatchesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.descriptors.page.listBatches.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listBatchesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.dataproc.v1.Batch[] = []; + stream.on('data', (response: protos.google.cloud.dataproc.v1.Batch) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listBatches.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listBatches, request)); + assert( + (client.descriptors.page.listBatches.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listBatches without error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListBatchesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), + ]; + client.descriptors.page.listBatches.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.dataproc.v1.IBatch[] = []; + const iterable = client.listBatchesAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listBatches.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listBatches.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listBatches with error', async () => { + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListBatchesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.descriptors.page.listBatches.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listBatchesAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.dataproc.v1.IBatch[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listBatches.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listBatches.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); + + describe('Path templates', () => { + + describe('batch', () => { + const fakePath = "/rendered/path/batch"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + batch: "batchValue", + }; + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.batchPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.batchPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('batchPath', () => { + const result = client.batchPath("projectValue", "locationValue", "batchValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.batchPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromBatchName', () => { + const result = client.matchProjectFromBatchName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromBatchName', () => { + const result = client.matchLocationFromBatchName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchBatchFromBatchName', () => { + const result = client.matchBatchFromBatchName(fakePath); + assert.strictEqual(result, "batchValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('location', () => { + const fakePath = "/rendered/path/location"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + }; + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.locationPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.locationPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('locationPath', () => { + const result = client.locationPath("projectValue", "locationValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.locationPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromLocationName', () => { + const result = client.matchProjectFromLocationName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.locationPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromLocationName', () => { + const result = client.matchLocationFromLocationName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.locationPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('nodeGroup', () => { + const fakePath = "/rendered/path/nodeGroup"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + cluster: "clusterValue", + node_group: "nodeGroupValue", + }; + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.nodeGroupPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.nodeGroupPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('nodeGroupPath', () => { + const result = client.nodeGroupPath("projectValue", "regionValue", "clusterValue", "nodeGroupValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromNodeGroupName', () => { + const result = client.matchProjectFromNodeGroupName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromNodeGroupName', () => { + const result = client.matchRegionFromNodeGroupName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchClusterFromNodeGroupName', () => { + const result = client.matchClusterFromNodeGroupName(fakePath); + assert.strictEqual(result, "clusterValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchNodeGroupFromNodeGroupName', () => { + const result = client.matchNodeGroupFromNodeGroupName(fakePath); + assert.strictEqual(result, "nodeGroupValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('project', () => { + const fakePath = "/rendered/path/project"; + const expectedParameters = { + project: "projectValue", + }; + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectPath', () => { + const result = client.projectPath("projectValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectName', () => { + const result = client.matchProjectFromProjectName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectLocationAutoscalingPolicy', () => { + const fakePath = "/rendered/path/projectLocationAutoscalingPolicy"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + autoscaling_policy: "autoscalingPolicyValue", + }; + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationAutoscalingPolicyPath', () => { + const result = client.projectLocationAutoscalingPolicyPath("projectValue", "locationValue", "autoscalingPolicyValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchProjectFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchLocationFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "autoscalingPolicyValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectLocationWorkflowTemplate', () => { + const fakePath = "/rendered/path/projectLocationWorkflowTemplate"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + workflow_template: "workflowTemplateValue", + }; + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationWorkflowTemplatePath', () => { + const result = client.projectLocationWorkflowTemplatePath("projectValue", "locationValue", "workflowTemplateValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchProjectFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchLocationFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchWorkflowTemplateFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "workflowTemplateValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectRegionAutoscalingPolicy', () => { + const fakePath = "/rendered/path/projectRegionAutoscalingPolicy"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + autoscaling_policy: "autoscalingPolicyValue", + }; + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionAutoscalingPolicyPath', () => { + const result = client.projectRegionAutoscalingPolicyPath("projectValue", "regionValue", "autoscalingPolicyValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchProjectFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchRegionFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "autoscalingPolicyValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectRegionWorkflowTemplate', () => { + const fakePath = "/rendered/path/projectRegionWorkflowTemplate"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + workflow_template: "workflowTemplateValue", + }; + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionWorkflowTemplatePath', () => { + const result = client.projectRegionWorkflowTemplatePath("projectValue", "regionValue", "workflowTemplateValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchProjectFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchRegionFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchWorkflowTemplateFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "workflowTemplateValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + }); +}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_cluster_controller_v1.ts b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_cluster_controller_v1.ts new file mode 100644 index 00000000000..01f54ef9787 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_cluster_controller_v1.ts @@ -0,0 +1,2000 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as clustercontrollerModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v1.ClusterControllerClient', () => { + describe('Common methods', () => { + it('has servicePath', () => { + const servicePath = clustercontrollerModule.v1.ClusterControllerClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = clustercontrollerModule.v1.ClusterControllerClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = clustercontrollerModule.v1.ClusterControllerClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.clusterControllerStub, undefined); + await client.initialize(); + assert(client.clusterControllerStub); + }); + + it('has close method for the initialized client', done => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.clusterControllerStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.clusterControllerStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getCluster', () => { + it('invokes getCluster without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.Cluster() + ); + client.innerApiCalls.getCluster = stubSimpleCall(expectedResponse); + const [response] = await client.getCluster(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getCluster without error using callback', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.Cluster() + ); + client.innerApiCalls.getCluster = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getCluster( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.ICluster|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getCluster with error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.getCluster = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getCluster(request), expectedError); + const actualRequest = (client.innerApiCalls.getCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getCluster with closed client', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getCluster(request), expectedError); + }); + }); + + describe('createCluster', () => { + it('invokes createCluster without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.createCluster = stubLongRunningCall(expectedResponse); + const [operation] = await client.createCluster(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.createCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createCluster without error using callback', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.createCluster = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.createCluster( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.createCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createCluster with call error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedError = new Error('expected'); + client.innerApiCalls.createCluster = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.createCluster(request), expectedError); + const actualRequest = (client.innerApiCalls.createCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createCluster with LRO error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedError = new Error('expected'); + client.innerApiCalls.createCluster = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.createCluster(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.createCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkCreateClusterProgress without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkCreateClusterProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkCreateClusterProgress with error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkCreateClusterProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + + describe('updateCluster', () => { + it('invokes updateCluster without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.updateCluster = stubLongRunningCall(expectedResponse); + const [operation] = await client.updateCluster(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateCluster without error using callback', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.updateCluster = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.updateCluster( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateCluster with call error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.updateCluster = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.updateCluster(request), expectedError); + const actualRequest = (client.innerApiCalls.updateCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateCluster with LRO error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.updateCluster = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.updateCluster(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.updateCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkUpdateClusterProgress without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkUpdateClusterProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkUpdateClusterProgress with error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkUpdateClusterProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + + describe('stopCluster', () => { + it('invokes stopCluster without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.StopClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.stopCluster = stubLongRunningCall(expectedResponse); + const [operation] = await client.stopCluster(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.stopCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.stopCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes stopCluster without error using callback', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.StopClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.stopCluster = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.stopCluster( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.stopCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.stopCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes stopCluster with call error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.StopClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.stopCluster = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.stopCluster(request), expectedError); + const actualRequest = (client.innerApiCalls.stopCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.stopCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes stopCluster with LRO error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.StopClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.stopCluster = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.stopCluster(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.stopCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.stopCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkStopClusterProgress without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkStopClusterProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkStopClusterProgress with error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkStopClusterProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + + describe('startCluster', () => { + it('invokes startCluster without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.StartClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.startCluster = stubLongRunningCall(expectedResponse); + const [operation] = await client.startCluster(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.startCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.startCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes startCluster without error using callback', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.StartClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.startCluster = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.startCluster( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.startCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.startCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes startCluster with call error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.StartClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.startCluster = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.startCluster(request), expectedError); + const actualRequest = (client.innerApiCalls.startCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.startCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes startCluster with LRO error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.StartClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.startCluster = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.startCluster(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.startCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.startCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkStartClusterProgress without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkStartClusterProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkStartClusterProgress with error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkStartClusterProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + + describe('deleteCluster', () => { + it('invokes deleteCluster without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.deleteCluster = stubLongRunningCall(expectedResponse); + const [operation] = await client.deleteCluster(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteCluster without error using callback', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.deleteCluster = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteCluster( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteCluster with call error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteCluster = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.deleteCluster(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteCluster with LRO error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteCluster = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.deleteCluster(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.deleteCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkDeleteClusterProgress without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkDeleteClusterProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkDeleteClusterProgress with error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkDeleteClusterProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + + describe('diagnoseCluster', () => { + it('invokes diagnoseCluster without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DiagnoseClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.diagnoseCluster = stubLongRunningCall(expectedResponse); + const [operation] = await client.diagnoseCluster(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.diagnoseCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.diagnoseCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes diagnoseCluster without error using callback', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DiagnoseClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.diagnoseCluster = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.diagnoseCluster( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.diagnoseCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.diagnoseCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes diagnoseCluster with call error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DiagnoseClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.diagnoseCluster = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.diagnoseCluster(request), expectedError); + const actualRequest = (client.innerApiCalls.diagnoseCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.diagnoseCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes diagnoseCluster with LRO error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DiagnoseClusterRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['clusterName']); + request.clusterName = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.diagnoseCluster = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.diagnoseCluster(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.diagnoseCluster as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.diagnoseCluster as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkDiagnoseClusterProgress without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkDiagnoseClusterProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkDiagnoseClusterProgress with error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkDiagnoseClusterProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + + describe('listClusters', () => { + it('invokes listClusters without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListClustersRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), + ]; + client.innerApiCalls.listClusters = stubSimpleCall(expectedResponse); + const [response] = await client.listClusters(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listClusters as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listClusters as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listClusters without error using callback', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListClustersRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), + ]; + client.innerApiCalls.listClusters = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listClusters( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.ICluster[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listClusters as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listClusters as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listClusters with error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListClustersRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedError = new Error('expected'); + client.innerApiCalls.listClusters = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listClusters(request), expectedError); + const actualRequest = (client.innerApiCalls.listClusters as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listClusters as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listClustersStream without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListClustersRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), + ]; + client.descriptors.page.listClusters.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listClustersStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.dataproc.v1.Cluster[] = []; + stream.on('data', (response: protos.google.cloud.dataproc.v1.Cluster) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listClusters.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listClusters, request)); + assert( + (client.descriptors.page.listClusters.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listClustersStream with error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListClustersRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedError = new Error('expected'); + client.descriptors.page.listClusters.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listClustersStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.dataproc.v1.Cluster[] = []; + stream.on('data', (response: protos.google.cloud.dataproc.v1.Cluster) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listClusters.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listClusters, request)); + assert( + (client.descriptors.page.listClusters.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listClusters without error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListClustersRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), + ]; + client.descriptors.page.listClusters.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.dataproc.v1.ICluster[] = []; + const iterable = client.listClustersAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listClusters.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listClusters.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listClusters with error', async () => { + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListClustersRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedError = new Error('expected'); + client.descriptors.page.listClusters.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listClustersAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.dataproc.v1.ICluster[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listClusters.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listClusters.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); + + describe('Path templates', () => { + + describe('batch', () => { + const fakePath = "/rendered/path/batch"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + batch: "batchValue", + }; + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.batchPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.batchPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('batchPath', () => { + const result = client.batchPath("projectValue", "locationValue", "batchValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.batchPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromBatchName', () => { + const result = client.matchProjectFromBatchName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromBatchName', () => { + const result = client.matchLocationFromBatchName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchBatchFromBatchName', () => { + const result = client.matchBatchFromBatchName(fakePath); + assert.strictEqual(result, "batchValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('nodeGroup', () => { + const fakePath = "/rendered/path/nodeGroup"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + cluster: "clusterValue", + node_group: "nodeGroupValue", + }; + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.nodeGroupPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.nodeGroupPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('nodeGroupPath', () => { + const result = client.nodeGroupPath("projectValue", "regionValue", "clusterValue", "nodeGroupValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromNodeGroupName', () => { + const result = client.matchProjectFromNodeGroupName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromNodeGroupName', () => { + const result = client.matchRegionFromNodeGroupName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchClusterFromNodeGroupName', () => { + const result = client.matchClusterFromNodeGroupName(fakePath); + assert.strictEqual(result, "clusterValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchNodeGroupFromNodeGroupName', () => { + const result = client.matchNodeGroupFromNodeGroupName(fakePath); + assert.strictEqual(result, "nodeGroupValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectLocationAutoscalingPolicy', () => { + const fakePath = "/rendered/path/projectLocationAutoscalingPolicy"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + autoscaling_policy: "autoscalingPolicyValue", + }; + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationAutoscalingPolicyPath', () => { + const result = client.projectLocationAutoscalingPolicyPath("projectValue", "locationValue", "autoscalingPolicyValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchProjectFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchLocationFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "autoscalingPolicyValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectLocationWorkflowTemplate', () => { + const fakePath = "/rendered/path/projectLocationWorkflowTemplate"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + workflow_template: "workflowTemplateValue", + }; + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationWorkflowTemplatePath', () => { + const result = client.projectLocationWorkflowTemplatePath("projectValue", "locationValue", "workflowTemplateValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchProjectFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchLocationFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchWorkflowTemplateFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "workflowTemplateValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectRegionAutoscalingPolicy', () => { + const fakePath = "/rendered/path/projectRegionAutoscalingPolicy"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + autoscaling_policy: "autoscalingPolicyValue", + }; + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionAutoscalingPolicyPath', () => { + const result = client.projectRegionAutoscalingPolicyPath("projectValue", "regionValue", "autoscalingPolicyValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchProjectFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchRegionFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "autoscalingPolicyValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectRegionWorkflowTemplate', () => { + const fakePath = "/rendered/path/projectRegionWorkflowTemplate"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + workflow_template: "workflowTemplateValue", + }; + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionWorkflowTemplatePath', () => { + const result = client.projectRegionWorkflowTemplatePath("projectValue", "regionValue", "workflowTemplateValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchProjectFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchRegionFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchWorkflowTemplateFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "workflowTemplateValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('service', () => { + const fakePath = "/rendered/path/service"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + service: "serviceValue", + }; + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.servicePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.servicePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('servicePath', () => { + const result = client.servicePath("projectValue", "locationValue", "serviceValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.servicePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromServiceName', () => { + const result = client.matchProjectFromServiceName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.servicePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromServiceName', () => { + const result = client.matchLocationFromServiceName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.servicePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchServiceFromServiceName', () => { + const result = client.matchServiceFromServiceName(fakePath); + assert.strictEqual(result, "serviceValue"); + assert((client.pathTemplates.servicePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + }); +}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_job_controller_v1.ts b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_job_controller_v1.ts new file mode 100644 index 00000000000..875a586cb5f --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_job_controller_v1.ts @@ -0,0 +1,1580 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as jobcontrollerModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v1.JobControllerClient', () => { + describe('Common methods', () => { + it('has servicePath', () => { + const servicePath = jobcontrollerModule.v1.JobControllerClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = jobcontrollerModule.v1.JobControllerClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = jobcontrollerModule.v1.JobControllerClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new jobcontrollerModule.v1.JobControllerClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.jobControllerStub, undefined); + await client.initialize(); + assert(client.jobControllerStub); + }); + + it('has close method for the initialized client', done => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.jobControllerStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.jobControllerStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('submitJob', () => { + it('invokes submitJob without error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.SubmitJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.Job() + ); + client.innerApiCalls.submitJob = stubSimpleCall(expectedResponse); + const [response] = await client.submitJob(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.submitJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.submitJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes submitJob without error using callback', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.SubmitJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.Job() + ); + client.innerApiCalls.submitJob = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.submitJob( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IJob|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.submitJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.submitJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes submitJob with error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.SubmitJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedError = new Error('expected'); + client.innerApiCalls.submitJob = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.submitJob(request), expectedError); + const actualRequest = (client.innerApiCalls.submitJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.submitJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes submitJob with closed client', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.SubmitJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); + request.region = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.submitJob(request), expectedError); + }); + }); + + describe('getJob', () => { + it('invokes getJob without error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.Job() + ); + client.innerApiCalls.getJob = stubSimpleCall(expectedResponse); + const [response] = await client.getJob(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getJob without error using callback', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.Job() + ); + client.innerApiCalls.getJob = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getJob( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IJob|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getJob with error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.getJob = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getJob(request), expectedError); + const actualRequest = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getJob with closed client', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getJob(request), expectedError); + }); + }); + + describe('updateJob', () => { + it('invokes updateJob without error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.Job() + ); + client.innerApiCalls.updateJob = stubSimpleCall(expectedResponse); + const [response] = await client.updateJob(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateJob without error using callback', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.Job() + ); + client.innerApiCalls.updateJob = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.updateJob( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IJob|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateJob with error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.updateJob = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.updateJob(request), expectedError); + const actualRequest = (client.innerApiCalls.updateJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateJob with closed client', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.updateJob(request), expectedError); + }); + }); + + describe('cancelJob', () => { + it('invokes cancelJob without error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CancelJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.Job() + ); + client.innerApiCalls.cancelJob = stubSimpleCall(expectedResponse); + const [response] = await client.cancelJob(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes cancelJob without error using callback', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CancelJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.Job() + ); + client.innerApiCalls.cancelJob = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.cancelJob( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IJob|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes cancelJob with error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CancelJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.cancelJob = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.cancelJob(request), expectedError); + const actualRequest = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes cancelJob with closed client', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CancelJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.cancelJob(request), expectedError); + }); + }); + + describe('deleteJob', () => { + it('invokes deleteJob without error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteJob = stubSimpleCall(expectedResponse); + const [response] = await client.deleteJob(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteJob without error using callback', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteJob = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteJob( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteJob with error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteJob = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteJob(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteJob with closed client', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['region']); + request.region = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['jobId']); + request.jobId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteJob(request), expectedError); + }); + }); + + describe('submitJobAsOperation', () => { + it('invokes submitJobAsOperation without error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.SubmitJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.submitJobAsOperation = stubLongRunningCall(expectedResponse); + const [operation] = await client.submitJobAsOperation(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes submitJobAsOperation without error using callback', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.SubmitJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.submitJobAsOperation = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.submitJobAsOperation( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes submitJobAsOperation with call error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.SubmitJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedError = new Error('expected'); + client.innerApiCalls.submitJobAsOperation = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.submitJobAsOperation(request), expectedError); + const actualRequest = (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes submitJobAsOperation with LRO error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.SubmitJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedError = new Error('expected'); + client.innerApiCalls.submitJobAsOperation = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.submitJobAsOperation(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkSubmitJobAsOperationProgress without error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkSubmitJobAsOperationProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkSubmitJobAsOperationProgress with error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkSubmitJobAsOperationProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + + describe('listJobs', () => { + it('invokes listJobs without error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), + ]; + client.innerApiCalls.listJobs = stubSimpleCall(expectedResponse); + const [response] = await client.listJobs(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listJobs without error using callback', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), + ]; + client.innerApiCalls.listJobs = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listJobs( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IJob[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listJobs with error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedError = new Error('expected'); + client.innerApiCalls.listJobs = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listJobs(request), expectedError); + const actualRequest = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listJobsStream without error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), + ]; + client.descriptors.page.listJobs.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listJobsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.dataproc.v1.Job[] = []; + stream.on('data', (response: protos.google.cloud.dataproc.v1.Job) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listJobs.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listJobs, request)); + assert( + (client.descriptors.page.listJobs.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listJobsStream with error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedError = new Error('expected'); + client.descriptors.page.listJobs.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listJobsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.dataproc.v1.Job[] = []; + stream.on('data', (response: protos.google.cloud.dataproc.v1.Job) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listJobs.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listJobs, request)); + assert( + (client.descriptors.page.listJobs.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listJobs without error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), + ]; + client.descriptors.page.listJobs.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.dataproc.v1.IJob[] = []; + const iterable = client.listJobsAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listJobs.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listJobs.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listJobs with error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); + request.region = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; + const expectedError = new Error('expected'); + client.descriptors.page.listJobs.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listJobsAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.dataproc.v1.IJob[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listJobs.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listJobs.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); + + describe('Path templates', () => { + + describe('batch', () => { + const fakePath = "/rendered/path/batch"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + batch: "batchValue", + }; + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.batchPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.batchPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('batchPath', () => { + const result = client.batchPath("projectValue", "locationValue", "batchValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.batchPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromBatchName', () => { + const result = client.matchProjectFromBatchName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromBatchName', () => { + const result = client.matchLocationFromBatchName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchBatchFromBatchName', () => { + const result = client.matchBatchFromBatchName(fakePath); + assert.strictEqual(result, "batchValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('nodeGroup', () => { + const fakePath = "/rendered/path/nodeGroup"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + cluster: "clusterValue", + node_group: "nodeGroupValue", + }; + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.nodeGroupPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.nodeGroupPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('nodeGroupPath', () => { + const result = client.nodeGroupPath("projectValue", "regionValue", "clusterValue", "nodeGroupValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromNodeGroupName', () => { + const result = client.matchProjectFromNodeGroupName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromNodeGroupName', () => { + const result = client.matchRegionFromNodeGroupName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchClusterFromNodeGroupName', () => { + const result = client.matchClusterFromNodeGroupName(fakePath); + assert.strictEqual(result, "clusterValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchNodeGroupFromNodeGroupName', () => { + const result = client.matchNodeGroupFromNodeGroupName(fakePath); + assert.strictEqual(result, "nodeGroupValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectLocationAutoscalingPolicy', () => { + const fakePath = "/rendered/path/projectLocationAutoscalingPolicy"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + autoscaling_policy: "autoscalingPolicyValue", + }; + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationAutoscalingPolicyPath', () => { + const result = client.projectLocationAutoscalingPolicyPath("projectValue", "locationValue", "autoscalingPolicyValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchProjectFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchLocationFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "autoscalingPolicyValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectLocationWorkflowTemplate', () => { + const fakePath = "/rendered/path/projectLocationWorkflowTemplate"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + workflow_template: "workflowTemplateValue", + }; + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationWorkflowTemplatePath', () => { + const result = client.projectLocationWorkflowTemplatePath("projectValue", "locationValue", "workflowTemplateValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchProjectFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchLocationFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchWorkflowTemplateFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "workflowTemplateValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectRegionAutoscalingPolicy', () => { + const fakePath = "/rendered/path/projectRegionAutoscalingPolicy"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + autoscaling_policy: "autoscalingPolicyValue", + }; + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionAutoscalingPolicyPath', () => { + const result = client.projectRegionAutoscalingPolicyPath("projectValue", "regionValue", "autoscalingPolicyValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchProjectFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchRegionFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "autoscalingPolicyValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectRegionWorkflowTemplate', () => { + const fakePath = "/rendered/path/projectRegionWorkflowTemplate"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + workflow_template: "workflowTemplateValue", + }; + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionWorkflowTemplatePath', () => { + const result = client.projectRegionWorkflowTemplatePath("projectValue", "regionValue", "workflowTemplateValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchProjectFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchRegionFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchWorkflowTemplateFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "workflowTemplateValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + }); +}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_node_group_controller_v1.ts b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_node_group_controller_v1.ts new file mode 100644 index 00000000000..2bf99b38cff --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_node_group_controller_v1.ts @@ -0,0 +1,983 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as nodegroupcontrollerModule from '../src'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); +} + +describe('v1.NodeGroupControllerClient', () => { + describe('Common methods', () => { + it('has servicePath', () => { + const servicePath = nodegroupcontrollerModule.v1.NodeGroupControllerClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = nodegroupcontrollerModule.v1.NodeGroupControllerClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = nodegroupcontrollerModule.v1.NodeGroupControllerClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.nodeGroupControllerStub, undefined); + await client.initialize(); + assert(client.nodeGroupControllerStub); + }); + + it('has close method for the initialized client', done => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.nodeGroupControllerStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.nodeGroupControllerStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getNodeGroup', () => { + it('invokes getNodeGroup without error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetNodeGroupRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetNodeGroupRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.NodeGroup() + ); + client.innerApiCalls.getNodeGroup = stubSimpleCall(expectedResponse); + const [response] = await client.getNodeGroup(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getNodeGroup as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getNodeGroup as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getNodeGroup without error using callback', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetNodeGroupRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetNodeGroupRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.NodeGroup() + ); + client.innerApiCalls.getNodeGroup = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getNodeGroup( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.INodeGroup|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getNodeGroup as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getNodeGroup as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getNodeGroup with error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetNodeGroupRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetNodeGroupRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.getNodeGroup = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getNodeGroup(request), expectedError); + const actualRequest = (client.innerApiCalls.getNodeGroup as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getNodeGroup as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getNodeGroup with closed client', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetNodeGroupRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetNodeGroupRequest', ['name']); + request.name = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getNodeGroup(request), expectedError); + }); + }); + + describe('createNodeGroup', () => { + it('invokes createNodeGroup without error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateNodeGroupRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateNodeGroupRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.createNodeGroup = stubLongRunningCall(expectedResponse); + const [operation] = await client.createNodeGroup(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.createNodeGroup as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createNodeGroup as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createNodeGroup without error using callback', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateNodeGroupRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateNodeGroupRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.createNodeGroup = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.createNodeGroup( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.createNodeGroup as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createNodeGroup as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createNodeGroup with call error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateNodeGroupRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateNodeGroupRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.createNodeGroup = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.createNodeGroup(request), expectedError); + const actualRequest = (client.innerApiCalls.createNodeGroup as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createNodeGroup as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createNodeGroup with LRO error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateNodeGroupRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateNodeGroupRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.createNodeGroup = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.createNodeGroup(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.createNodeGroup as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createNodeGroup as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkCreateNodeGroupProgress without error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkCreateNodeGroupProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkCreateNodeGroupProgress with error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkCreateNodeGroupProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + + describe('resizeNodeGroup', () => { + it('invokes resizeNodeGroup without error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ResizeNodeGroupRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ResizeNodeGroupRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.resizeNodeGroup = stubLongRunningCall(expectedResponse); + const [operation] = await client.resizeNodeGroup(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.resizeNodeGroup as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.resizeNodeGroup as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes resizeNodeGroup without error using callback', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ResizeNodeGroupRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ResizeNodeGroupRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.resizeNodeGroup = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.resizeNodeGroup( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.resizeNodeGroup as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.resizeNodeGroup as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes resizeNodeGroup with call error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ResizeNodeGroupRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ResizeNodeGroupRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.resizeNodeGroup = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.resizeNodeGroup(request), expectedError); + const actualRequest = (client.innerApiCalls.resizeNodeGroup as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.resizeNodeGroup as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes resizeNodeGroup with LRO error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ResizeNodeGroupRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ResizeNodeGroupRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.resizeNodeGroup = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.resizeNodeGroup(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.resizeNodeGroup as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.resizeNodeGroup as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkResizeNodeGroupProgress without error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkResizeNodeGroupProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkResizeNodeGroupProgress with error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkResizeNodeGroupProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + + describe('Path templates', () => { + + describe('batch', () => { + const fakePath = "/rendered/path/batch"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + batch: "batchValue", + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.batchPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.batchPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('batchPath', () => { + const result = client.batchPath("projectValue", "locationValue", "batchValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.batchPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromBatchName', () => { + const result = client.matchProjectFromBatchName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromBatchName', () => { + const result = client.matchLocationFromBatchName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchBatchFromBatchName', () => { + const result = client.matchBatchFromBatchName(fakePath); + assert.strictEqual(result, "batchValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('clusterRegion', () => { + const fakePath = "/rendered/path/clusterRegion"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + cluster: "clusterValue", + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.clusterRegionPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.clusterRegionPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('clusterRegionPath', () => { + const result = client.clusterRegionPath("projectValue", "regionValue", "clusterValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.clusterRegionPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromClusterRegionName', () => { + const result = client.matchProjectFromClusterRegionName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.clusterRegionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromClusterRegionName', () => { + const result = client.matchRegionFromClusterRegionName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.clusterRegionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchClusterFromClusterRegionName', () => { + const result = client.matchClusterFromClusterRegionName(fakePath); + assert.strictEqual(result, "clusterValue"); + assert((client.pathTemplates.clusterRegionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('nodeGroup', () => { + const fakePath = "/rendered/path/nodeGroup"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + cluster: "clusterValue", + node_group: "nodeGroupValue", + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.nodeGroupPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.nodeGroupPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('nodeGroupPath', () => { + const result = client.nodeGroupPath("projectValue", "regionValue", "clusterValue", "nodeGroupValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromNodeGroupName', () => { + const result = client.matchProjectFromNodeGroupName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromNodeGroupName', () => { + const result = client.matchRegionFromNodeGroupName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchClusterFromNodeGroupName', () => { + const result = client.matchClusterFromNodeGroupName(fakePath); + assert.strictEqual(result, "clusterValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchNodeGroupFromNodeGroupName', () => { + const result = client.matchNodeGroupFromNodeGroupName(fakePath); + assert.strictEqual(result, "nodeGroupValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('project', () => { + const fakePath = "/rendered/path/project"; + const expectedParameters = { + project: "projectValue", + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectPath', () => { + const result = client.projectPath("projectValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectName', () => { + const result = client.matchProjectFromProjectName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectLocationAutoscalingPolicy', () => { + const fakePath = "/rendered/path/projectLocationAutoscalingPolicy"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + autoscaling_policy: "autoscalingPolicyValue", + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationAutoscalingPolicyPath', () => { + const result = client.projectLocationAutoscalingPolicyPath("projectValue", "locationValue", "autoscalingPolicyValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchProjectFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchLocationFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "autoscalingPolicyValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectLocationWorkflowTemplate', () => { + const fakePath = "/rendered/path/projectLocationWorkflowTemplate"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + workflow_template: "workflowTemplateValue", + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationWorkflowTemplatePath', () => { + const result = client.projectLocationWorkflowTemplatePath("projectValue", "locationValue", "workflowTemplateValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchProjectFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchLocationFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchWorkflowTemplateFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "workflowTemplateValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectRegionAutoscalingPolicy', () => { + const fakePath = "/rendered/path/projectRegionAutoscalingPolicy"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + autoscaling_policy: "autoscalingPolicyValue", + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionAutoscalingPolicyPath', () => { + const result = client.projectRegionAutoscalingPolicyPath("projectValue", "regionValue", "autoscalingPolicyValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchProjectFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchRegionFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "autoscalingPolicyValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectRegionWorkflowTemplate', () => { + const fakePath = "/rendered/path/projectRegionWorkflowTemplate"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + workflow_template: "workflowTemplateValue", + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionWorkflowTemplatePath', () => { + const result = client.projectRegionWorkflowTemplatePath("projectValue", "regionValue", "workflowTemplateValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchProjectFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchRegionFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchWorkflowTemplateFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "workflowTemplateValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('region', () => { + const fakePath = "/rendered/path/region"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.regionPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.regionPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('regionPath', () => { + const result = client.regionPath("projectValue", "regionValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.regionPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromRegionName', () => { + const result = client.matchProjectFromRegionName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.regionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromRegionName', () => { + const result = client.matchRegionFromRegionName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.regionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + }); +}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_workflow_template_service_v1.ts b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_workflow_template_service_v1.ts new file mode 100644 index 00000000000..72c13b7199a --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_workflow_template_service_v1.ts @@ -0,0 +1,1557 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as workflowtemplateserviceModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v1.WorkflowTemplateServiceClient', () => { + describe('Common methods', () => { + it('has servicePath', () => { + const servicePath = workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.workflowTemplateServiceStub, undefined); + await client.initialize(); + assert(client.workflowTemplateServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.workflowTemplateServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.workflowTemplateServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('createWorkflowTemplate', () => { + it('invokes createWorkflowTemplate without error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.WorkflowTemplate() + ); + client.innerApiCalls.createWorkflowTemplate = stubSimpleCall(expectedResponse); + const [response] = await client.createWorkflowTemplate(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.createWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createWorkflowTemplate without error using callback', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.WorkflowTemplate() + ); + client.innerApiCalls.createWorkflowTemplate = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.createWorkflowTemplate( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IWorkflowTemplate|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.createWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createWorkflowTemplate with error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.createWorkflowTemplate = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.createWorkflowTemplate(request), expectedError); + const actualRequest = (client.innerApiCalls.createWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.createWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createWorkflowTemplate with closed client', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest', ['parent']); + request.parent = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.createWorkflowTemplate(request), expectedError); + }); + }); + + describe('getWorkflowTemplate', () => { + it('invokes getWorkflowTemplate without error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetWorkflowTemplateRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.WorkflowTemplate() + ); + client.innerApiCalls.getWorkflowTemplate = stubSimpleCall(expectedResponse); + const [response] = await client.getWorkflowTemplate(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getWorkflowTemplate without error using callback', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetWorkflowTemplateRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.WorkflowTemplate() + ); + client.innerApiCalls.getWorkflowTemplate = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getWorkflowTemplate( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IWorkflowTemplate|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getWorkflowTemplate with error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetWorkflowTemplateRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.getWorkflowTemplate = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getWorkflowTemplate(request), expectedError); + const actualRequest = (client.innerApiCalls.getWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getWorkflowTemplate with closed client', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.GetWorkflowTemplateRequest', ['name']); + request.name = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getWorkflowTemplate(request), expectedError); + }); + }); + + describe('updateWorkflowTemplate', () => { + it('invokes updateWorkflowTemplate without error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest() + ); + request.template ??= {}; + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest', ['template', 'name']); + request.template.name = defaultValue1; + const expectedHeaderRequestParams = `template.name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.WorkflowTemplate() + ); + client.innerApiCalls.updateWorkflowTemplate = stubSimpleCall(expectedResponse); + const [response] = await client.updateWorkflowTemplate(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateWorkflowTemplate without error using callback', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest() + ); + request.template ??= {}; + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest', ['template', 'name']); + request.template.name = defaultValue1; + const expectedHeaderRequestParams = `template.name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.WorkflowTemplate() + ); + client.innerApiCalls.updateWorkflowTemplate = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.updateWorkflowTemplate( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IWorkflowTemplate|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateWorkflowTemplate with error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest() + ); + request.template ??= {}; + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest', ['template', 'name']); + request.template.name = defaultValue1; + const expectedHeaderRequestParams = `template.name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.updateWorkflowTemplate = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.updateWorkflowTemplate(request), expectedError); + const actualRequest = (client.innerApiCalls.updateWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateWorkflowTemplate with closed client', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest() + ); + request.template ??= {}; + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest', ['template', 'name']); + request.template.name = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.updateWorkflowTemplate(request), expectedError); + }); + }); + + describe('deleteWorkflowTemplate', () => { + it('invokes deleteWorkflowTemplate without error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteWorkflowTemplate = stubSimpleCall(expectedResponse); + const [response] = await client.deleteWorkflowTemplate(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteWorkflowTemplate without error using callback', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteWorkflowTemplate = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteWorkflowTemplate( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteWorkflowTemplate with error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteWorkflowTemplate = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteWorkflowTemplate(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteWorkflowTemplate with closed client', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest', ['name']); + request.name = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteWorkflowTemplate(request), expectedError); + }); + }); + + describe('instantiateWorkflowTemplate', () => { + it('invokes instantiateWorkflowTemplate without error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.instantiateWorkflowTemplate = stubLongRunningCall(expectedResponse); + const [operation] = await client.instantiateWorkflowTemplate(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes instantiateWorkflowTemplate without error using callback', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.instantiateWorkflowTemplate = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.instantiateWorkflowTemplate( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes instantiateWorkflowTemplate with call error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.instantiateWorkflowTemplate = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.instantiateWorkflowTemplate(request), expectedError); + const actualRequest = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes instantiateWorkflowTemplate with LRO error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest', ['name']); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.instantiateWorkflowTemplate = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.instantiateWorkflowTemplate(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkInstantiateWorkflowTemplateProgress without error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkInstantiateWorkflowTemplateProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkInstantiateWorkflowTemplateProgress with error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkInstantiateWorkflowTemplateProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + + describe('instantiateInlineWorkflowTemplate', () => { + it('invokes instantiateInlineWorkflowTemplate without error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.instantiateInlineWorkflowTemplate = stubLongRunningCall(expectedResponse); + const [operation] = await client.instantiateInlineWorkflowTemplate(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes instantiateInlineWorkflowTemplate without error using callback', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.instantiateInlineWorkflowTemplate = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.instantiateInlineWorkflowTemplate( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes instantiateInlineWorkflowTemplate with call error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.instantiateInlineWorkflowTemplate = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.instantiateInlineWorkflowTemplate(request), expectedError); + const actualRequest = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes instantiateInlineWorkflowTemplate with LRO error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.instantiateInlineWorkflowTemplate = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.instantiateInlineWorkflowTemplate(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkInstantiateInlineWorkflowTemplateProgress without error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkInstantiateInlineWorkflowTemplateProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkInstantiateInlineWorkflowTemplateProgress with error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkInstantiateInlineWorkflowTemplateProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); + + describe('listWorkflowTemplates', () => { + it('invokes listWorkflowTemplates without error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), + ]; + client.innerApiCalls.listWorkflowTemplates = stubSimpleCall(expectedResponse); + const [response] = await client.listWorkflowTemplates(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listWorkflowTemplates as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listWorkflowTemplates as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listWorkflowTemplates without error using callback', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), + ]; + client.innerApiCalls.listWorkflowTemplates = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listWorkflowTemplates( + request, + (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IWorkflowTemplate[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listWorkflowTemplates as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listWorkflowTemplates as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listWorkflowTemplates with error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.listWorkflowTemplates = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listWorkflowTemplates(request), expectedError); + const actualRequest = (client.innerApiCalls.listWorkflowTemplates as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listWorkflowTemplates as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listWorkflowTemplatesStream without error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), + ]; + client.descriptors.page.listWorkflowTemplates.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listWorkflowTemplatesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.dataproc.v1.WorkflowTemplate[] = []; + stream.on('data', (response: protos.google.cloud.dataproc.v1.WorkflowTemplate) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listWorkflowTemplates.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listWorkflowTemplates, request)); + assert( + (client.descriptors.page.listWorkflowTemplates.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listWorkflowTemplatesStream with error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.descriptors.page.listWorkflowTemplates.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listWorkflowTemplatesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.dataproc.v1.WorkflowTemplate[] = []; + stream.on('data', (response: protos.google.cloud.dataproc.v1.WorkflowTemplate) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listWorkflowTemplates.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listWorkflowTemplates, request)); + assert( + (client.descriptors.page.listWorkflowTemplates.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listWorkflowTemplates without error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), + generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), + ]; + client.descriptors.page.listWorkflowTemplates.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.dataproc.v1.IWorkflowTemplate[] = []; + const iterable = client.listWorkflowTemplatesAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listWorkflowTemplates.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listWorkflowTemplates.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listWorkflowTemplates with error', async () => { + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.descriptors.page.listWorkflowTemplates.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listWorkflowTemplatesAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.dataproc.v1.IWorkflowTemplate[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listWorkflowTemplates.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listWorkflowTemplates.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); + + describe('Path templates', () => { + + describe('batch', () => { + const fakePath = "/rendered/path/batch"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + batch: "batchValue", + }; + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.batchPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.batchPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('batchPath', () => { + const result = client.batchPath("projectValue", "locationValue", "batchValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.batchPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromBatchName', () => { + const result = client.matchProjectFromBatchName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromBatchName', () => { + const result = client.matchLocationFromBatchName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchBatchFromBatchName', () => { + const result = client.matchBatchFromBatchName(fakePath); + assert.strictEqual(result, "batchValue"); + assert((client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('nodeGroup', () => { + const fakePath = "/rendered/path/nodeGroup"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + cluster: "clusterValue", + node_group: "nodeGroupValue", + }; + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.nodeGroupPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.nodeGroupPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('nodeGroupPath', () => { + const result = client.nodeGroupPath("projectValue", "regionValue", "clusterValue", "nodeGroupValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromNodeGroupName', () => { + const result = client.matchProjectFromNodeGroupName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromNodeGroupName', () => { + const result = client.matchRegionFromNodeGroupName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchClusterFromNodeGroupName', () => { + const result = client.matchClusterFromNodeGroupName(fakePath); + assert.strictEqual(result, "clusterValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchNodeGroupFromNodeGroupName', () => { + const result = client.matchNodeGroupFromNodeGroupName(fakePath); + assert.strictEqual(result, "nodeGroupValue"); + assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('project', () => { + const fakePath = "/rendered/path/project"; + const expectedParameters = { + project: "projectValue", + }; + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectPath', () => { + const result = client.projectPath("projectValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectName', () => { + const result = client.matchProjectFromProjectName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectLocationAutoscalingPolicy', () => { + const fakePath = "/rendered/path/projectLocationAutoscalingPolicy"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + autoscaling_policy: "autoscalingPolicyValue", + }; + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationAutoscalingPolicyPath', () => { + const result = client.projectLocationAutoscalingPolicyPath("projectValue", "locationValue", "autoscalingPolicyValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchProjectFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchLocationFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName', () => { + const result = client.matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "autoscalingPolicyValue"); + assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectLocationWorkflowTemplate', () => { + const fakePath = "/rendered/path/projectLocationWorkflowTemplate"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + workflow_template: "workflowTemplateValue", + }; + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationWorkflowTemplatePath', () => { + const result = client.projectLocationWorkflowTemplatePath("projectValue", "locationValue", "workflowTemplateValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchProjectFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchLocationFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchWorkflowTemplateFromProjectLocationWorkflowTemplateName', () => { + const result = client.matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, "workflowTemplateValue"); + assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectRegionAutoscalingPolicy', () => { + const fakePath = "/rendered/path/projectRegionAutoscalingPolicy"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + autoscaling_policy: "autoscalingPolicyValue", + }; + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionAutoscalingPolicyPath', () => { + const result = client.projectRegionAutoscalingPolicyPath("projectValue", "regionValue", "autoscalingPolicyValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchProjectFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchRegionFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName', () => { + const result = client.matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, "autoscalingPolicyValue"); + assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('projectRegionWorkflowTemplate', () => { + const fakePath = "/rendered/path/projectRegionWorkflowTemplate"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + workflow_template: "workflowTemplateValue", + }; + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionWorkflowTemplatePath', () => { + const result = client.projectRegionWorkflowTemplatePath("projectValue", "regionValue", "workflowTemplateValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchProjectFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchRegionFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchWorkflowTemplateFromProjectRegionWorkflowTemplateName', () => { + const result = client.matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, "workflowTemplateValue"); + assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('region', () => { + const fakePath = "/rendered/path/region"; + const expectedParameters = { + project: "projectValue", + region: "regionValue", + }; + const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.regionPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.regionPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('regionPath', () => { + const result = client.regionPath("projectValue", "regionValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.regionPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromRegionName', () => { + const result = client.matchProjectFromRegionName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.regionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchRegionFromRegionName', () => { + const result = client.matchRegionFromRegionName(fakePath); + assert.strictEqual(result, "regionValue"); + assert((client.pathTemplates.regionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + }); +}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/tsconfig.json b/owl-bot-staging/google-cloud-dataproc/v1/tsconfig.json new file mode 100644 index 00000000000..c78f1c884ef --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/tsconfig.json @@ -0,0 +1,19 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "resolveJsonModule": true, + "lib": [ + "es2018", + "dom" + ] + }, + "include": [ + "src/*.ts", + "src/**/*.ts", + "test/*.ts", + "test/**/*.ts", + "system-test/*.ts" + ] +} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/webpack.config.js b/owl-bot-staging/google-cloud-dataproc/v1/webpack.config.js new file mode 100644 index 00000000000..050b272c0e0 --- /dev/null +++ b/owl-bot-staging/google-cloud-dataproc/v1/webpack.config.js @@ -0,0 +1,64 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const path = require('path'); + +module.exports = { + entry: './src/index.ts', + output: { + library: 'dataproc', + filename: './dataproc.js', + }, + node: { + child_process: 'empty', + fs: 'empty', + crypto: 'empty', + }, + resolve: { + alias: { + '../../../package.json': path.resolve(__dirname, 'package.json'), + }, + extensions: ['.js', '.json', '.ts'], + }, + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + }, + { + test: /node_modules[\\/]@grpc[\\/]grpc-js/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]grpc/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]retry-request/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]https?-proxy-agent/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]gtoken/, + use: 'null-loader' + }, + ], + }, + mode: 'production', +}; From f2bc200c129f320a82c59a4877623dd8fd778b9d Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 13 Dec 2022 01:16:02 +0000 Subject: [PATCH 2/2] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot=20po?= =?UTF-8?q?st-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- .../google-cloud-dataproc/v1/.eslintignore | 7 - .../google-cloud-dataproc/v1/.eslintrc.json | 3 - .../google-cloud-dataproc/v1/.gitignore | 14 - .../google-cloud-dataproc/v1/.jsdoc.js | 55 - .../google-cloud-dataproc/v1/.mocharc.js | 33 - .../google-cloud-dataproc/v1/.prettierrc.js | 22 - .../google-cloud-dataproc/v1/README.md | 1 - .../v1/linkinator.config.json | 16 - .../google-cloud-dataproc/v1/package.json | 69 - .../dataproc/v1/autoscaling_policies.proto | 366 - .../google/cloud/dataproc/v1/batches.proto | 372 - .../google/cloud/dataproc/v1/clusters.proto | 1473 -- .../google/cloud/dataproc/v1/jobs.proto | 951 - .../google/cloud/dataproc/v1/operations.proto | 166 - .../google/cloud/dataproc/v1/shared.proto | 341 - .../dataproc/v1/workflow_templates.proto | 807 - ...olicy_service.create_autoscaling_policy.js | 73 - ...olicy_service.delete_autoscaling_policy.js | 68 - ...g_policy_service.get_autoscaling_policy.js | 68 - ...olicy_service.list_autoscaling_policies.js | 80 - ...olicy_service.update_autoscaling_policy.js | 61 - .../v1/batch_controller.create_batch.js | 86 - .../v1/batch_controller.delete_batch.js | 61 - .../v1/batch_controller.get_batch.js | 61 - .../v1/batch_controller.list_batches.js | 74 - .../v1/cluster_controller.create_cluster.js | 90 - .../v1/cluster_controller.delete_cluster.js | 91 - .../v1/cluster_controller.diagnose_cluster.js | 73 - .../v1/cluster_controller.get_cluster.js | 72 - .../v1/cluster_controller.list_clusters.js | 95 - .../v1/cluster_controller.start_cluster.js | 91 - .../v1/cluster_controller.stop_cluster.js | 91 - .../v1/cluster_controller.update_cluster.js | 154 - .../generated/v1/job_controller.cancel_job.js | 72 - .../generated/v1/job_controller.delete_job.js | 72 - .../generated/v1/job_controller.get_job.js | 72 - .../generated/v1/job_controller.list_jobs.js | 102 - .../generated/v1/job_controller.submit_job.js | 85 - .../job_controller.submit_job_as_operation.js | 86 - .../generated/v1/job_controller.update_job.js | 87 - ...pet_metadata.google.cloud.dataproc.v1.json | 1679 -- ...mplate_service.create_workflow_template.js | 73 - ...mplate_service.delete_workflow_template.js | 74 - ..._template_service.get_workflow_template.js | 74 - ...ce.instantiate_inline_workflow_template.js | 84 - ...e_service.instantiate_workflow_template.js | 92 - ...emplate_service.list_workflow_templates.js | 79 - ...mplate_service.update_workflow_template.js | 62 - .../google-cloud-dataproc/v1/src/index.ts | 35 - .../v1/autoscaling_policy_service_client.ts | 1240 -- ...oscaling_policy_service_client_config.json | 51 - ...autoscaling_policy_service_proto_list.json | 10 - .../v1/src/v1/batch_controller_client.ts | 1183 -- .../v1/batch_controller_client_config.json | 42 - .../src/v1/batch_controller_proto_list.json | 10 - .../v1/src/v1/cluster_controller_client.ts | 1840 -- .../v1/cluster_controller_client_config.json | 72 - .../src/v1/cluster_controller_proto_list.json | 10 - .../v1/src/v1/gapic_metadata.json | 453 - .../google-cloud-dataproc/v1/src/v1/index.ts | 24 - .../v1/src/v1/job_controller_client.ts | 1465 -- .../src/v1/job_controller_client_config.json | 69 - .../v1/src/v1/job_controller_proto_list.json | 10 - .../v1/workflow_template_service_client.ts | 1561 -- ...rkflow_template_service_client_config.json | 69 - .../workflow_template_service_proto_list.json | 10 - .../system-test/fixtures/sample/src/index.js | 32 - .../system-test/fixtures/sample/src/index.ts | 62 - .../v1/system-test/install.ts | 49 - .../gapic_autoscaling_policy_service_v1.ts | 1233 -- .../v1/test/gapic_batch_controller_v1.ts | 1183 -- .../v1/test/gapic_cluster_controller_v1.ts | 2000 -- .../v1/test/gapic_job_controller_v1.ts | 1580 -- .../v1/test/gapic_node_group_controller_v1.ts | 983 - .../gapic_workflow_template_service_v1.ts | 1557 -- .../google-cloud-dataproc/v1/tsconfig.json | 19 - .../v1/webpack.config.js | 64 - packages/google-cloud-dataproc/README.md | 3 + .../google/cloud/dataproc/v1/clusters.proto | 304 +- .../google/cloud/dataproc/v1/jobs.proto | 93 +- .../cloud/dataproc/v1/node_groups.proto | 0 .../google/cloud/dataproc/v1/operations.proto | 54 +- .../google-cloud-dataproc/protos/protos.d.ts | 939 +- .../google-cloud-dataproc/protos/protos.js | 15060 +++++++++------- .../google-cloud-dataproc/protos/protos.json | 328 +- .../google-cloud-dataproc/samples/README.md | 54 + .../v1/cluster_controller.create_cluster.js | 7 +- .../v1/cluster_controller.delete_cluster.js | 4 +- .../v1/cluster_controller.start_cluster.js | 4 +- .../v1/cluster_controller.stop_cluster.js | 4 +- .../v1/cluster_controller.update_cluster.js | 4 +- ...node_group_controller.create_node_group.js | 0 .../node_group_controller.get_node_group.js | 0 ...node_group_controller.resize_node_group.js | 0 ...pet_metadata.google.cloud.dataproc.v1.json | 146 +- packages/google-cloud-dataproc/src/index.ts | 4 + .../v1/autoscaling_policy_service_client.ts | 76 + ...autoscaling_policy_service_proto_list.json | 1 + .../src/v1/batch_controller_client.ts | 76 + .../src/v1/batch_controller_proto_list.json | 1 + .../src/v1/cluster_controller_client.ts | 102 +- .../src/v1/cluster_controller_proto_list.json | 1 + .../src/v1/gapic_metadata.json | 44 + .../google-cloud-dataproc/src/v1/index.ts | 1 + .../src/v1/job_controller_client.ts | 76 + .../src/v1/job_controller_proto_list.json | 1 + .../src/v1/node_group_controller_client.ts | 1073 +- .../node_group_controller_client_config.json | 0 .../v1/node_group_controller_proto_list.json | 0 .../v1/workflow_template_service_client.ts | 76 + .../workflow_template_service_proto_list.json | 1 + .../system-test/fixtures/sample/src/index.js | 1 + .../system-test/fixtures/sample/src/index.ts | 9 + .../gapic_autoscaling_policy_service_v1.ts | 77 + .../test/gapic_batch_controller_v1.ts | 76 + .../test/gapic_cluster_controller_v1.ts | 76 + .../test/gapic_job_controller_v1.ts | 76 + .../test/gapic_node_group_controller_v1.ts | 1390 ++ .../gapic_workflow_template_service_v1.ts | 77 + 119 files changed, 13392 insertions(+), 32721 deletions(-) delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/.eslintignore delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/.eslintrc.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/.gitignore delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/.jsdoc.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/.mocharc.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/.prettierrc.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/README.md delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/linkinator.config.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/package.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/autoscaling_policies.proto delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/batches.proto delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/clusters.proto delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/jobs.proto delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/operations.proto delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/shared.proto delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/workflow_templates.proto delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.create_autoscaling_policy.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.delete_autoscaling_policy.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.get_autoscaling_policy.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.list_autoscaling_policies.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.update_autoscaling_policy.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.create_batch.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.delete_batch.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.get_batch.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.list_batches.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.create_cluster.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.delete_cluster.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.diagnose_cluster.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.get_cluster.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.list_clusters.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.start_cluster.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.stop_cluster.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.update_cluster.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.cancel_job.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.delete_job.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.get_job.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.list_jobs.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job_as_operation.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.update_job.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/snippet_metadata.google.cloud.dataproc.v1.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.create_workflow_template.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.delete_workflow_template.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.get_workflow_template.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_inline_workflow_template.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_workflow_template.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.list_workflow_templates.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.update_workflow_template.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/index.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client_config.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_proto_list.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client_config.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_proto_list.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client_config.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_proto_list.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/gapic_metadata.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/index.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client_config.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_proto_list.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client_config.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_proto_list.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.js delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/system-test/install.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/test/gapic_autoscaling_policy_service_v1.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/test/gapic_batch_controller_v1.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/test/gapic_cluster_controller_v1.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/test/gapic_job_controller_v1.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/test/gapic_node_group_controller_v1.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/test/gapic_workflow_template_service_v1.ts delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/tsconfig.json delete mode 100644 owl-bot-staging/google-cloud-dataproc/v1/webpack.config.js rename {owl-bot-staging/google-cloud-dataproc/v1 => packages/google-cloud-dataproc}/protos/google/cloud/dataproc/v1/node_groups.proto (100%) rename {owl-bot-staging/google-cloud-dataproc/v1 => packages/google-cloud-dataproc}/samples/generated/v1/node_group_controller.create_node_group.js (100%) rename {owl-bot-staging/google-cloud-dataproc/v1 => packages/google-cloud-dataproc}/samples/generated/v1/node_group_controller.get_node_group.js (100%) rename {owl-bot-staging/google-cloud-dataproc/v1 => packages/google-cloud-dataproc}/samples/generated/v1/node_group_controller.resize_node_group.js (100%) rename {owl-bot-staging/google-cloud-dataproc/v1 => packages/google-cloud-dataproc}/src/v1/node_group_controller_client.ts (50%) rename {owl-bot-staging/google-cloud-dataproc/v1 => packages/google-cloud-dataproc}/src/v1/node_group_controller_client_config.json (100%) rename {owl-bot-staging/google-cloud-dataproc/v1 => packages/google-cloud-dataproc}/src/v1/node_group_controller_proto_list.json (100%) create mode 100644 packages/google-cloud-dataproc/test/gapic_node_group_controller_v1.ts diff --git a/owl-bot-staging/google-cloud-dataproc/v1/.eslintignore b/owl-bot-staging/google-cloud-dataproc/v1/.eslintignore deleted file mode 100644 index cfc348ec4d1..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/.eslintignore +++ /dev/null @@ -1,7 +0,0 @@ -**/node_modules -**/.coverage -build/ -docs/ -protos/ -system-test/ -samples/generated/ diff --git a/owl-bot-staging/google-cloud-dataproc/v1/.eslintrc.json b/owl-bot-staging/google-cloud-dataproc/v1/.eslintrc.json deleted file mode 100644 index 78215349546..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/.eslintrc.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "extends": "./node_modules/gts" -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/.gitignore b/owl-bot-staging/google-cloud-dataproc/v1/.gitignore deleted file mode 100644 index 5d32b23782f..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -**/*.log -**/node_modules -.coverage -coverage -.nyc_output -docs/ -out/ -build/ -system-test/secrets.js -system-test/*key.json -*.lock -.DS_Store -package-lock.json -__pycache__ diff --git a/owl-bot-staging/google-cloud-dataproc/v1/.jsdoc.js b/owl-bot-staging/google-cloud-dataproc/v1/.jsdoc.js deleted file mode 100644 index 2fa0c39341c..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/.jsdoc.js +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -'use strict'; - -module.exports = { - opts: { - readme: './README.md', - package: './package.json', - template: './node_modules/jsdoc-fresh', - recurse: true, - verbose: true, - destination: './docs/' - }, - plugins: [ - 'plugins/markdown', - 'jsdoc-region-tag' - ], - source: { - excludePattern: '(^|\\/|\\\\)[._]', - include: [ - 'build/src', - 'protos' - ], - includePattern: '\\.js$' - }, - templates: { - copyright: 'Copyright 2022 Google LLC', - includeDate: false, - sourceFiles: false, - systemName: '@google-cloud/dataproc', - theme: 'lumen', - default: { - outputSourceFiles: false - } - }, - markdown: { - idInHeadings: true - } -}; diff --git a/owl-bot-staging/google-cloud-dataproc/v1/.mocharc.js b/owl-bot-staging/google-cloud-dataproc/v1/.mocharc.js deleted file mode 100644 index 481c522b00f..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/.mocharc.js +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -const config = { - "enable-source-maps": true, - "throw-deprecation": true, - "timeout": 10000 -} -if (process.env.MOCHA_THROW_DEPRECATION === 'false') { - delete config['throw-deprecation']; -} -if (process.env.MOCHA_REPORTER) { - config.reporter = process.env.MOCHA_REPORTER; -} -if (process.env.MOCHA_REPORTER_OUTPUT) { - config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; -} -module.exports = config diff --git a/owl-bot-staging/google-cloud-dataproc/v1/.prettierrc.js b/owl-bot-staging/google-cloud-dataproc/v1/.prettierrc.js deleted file mode 100644 index 494e147865d..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/.prettierrc.js +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -module.exports = { - ...require('gts/.prettierrc.json') -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/README.md b/owl-bot-staging/google-cloud-dataproc/v1/README.md deleted file mode 100644 index 3e8aa9dfae5..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/README.md +++ /dev/null @@ -1 +0,0 @@ -Dataproc: Nodejs Client diff --git a/owl-bot-staging/google-cloud-dataproc/v1/linkinator.config.json b/owl-bot-staging/google-cloud-dataproc/v1/linkinator.config.json deleted file mode 100644 index befd23c8633..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/linkinator.config.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "recurse": true, - "skip": [ - "https://codecov.io/gh/googleapis/", - "www.googleapis.com", - "img.shields.io", - "https://console.cloud.google.com/cloudshell", - "https://support.google.com" - ], - "silent": true, - "concurrency": 5, - "retry": true, - "retryErrors": true, - "retryErrorsCount": 5, - "retryErrorsJitter": 3000 -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/package.json b/owl-bot-staging/google-cloud-dataproc/v1/package.json deleted file mode 100644 index 058a2a5d9ec..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/package.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "name": "@google-cloud/dataproc", - "version": "0.1.0", - "description": "Dataproc client for Node.js", - "repository": "googleapis/nodejs-dataproc", - "license": "Apache-2.0", - "author": "Google LLC", - "main": "build/src/index.js", - "files": [ - "build/src", - "build/protos" - ], - "keywords": [ - "google apis client", - "google api client", - "google apis", - "google api", - "google", - "google cloud platform", - "google cloud", - "cloud", - "google dataproc", - "dataproc", - "autoscaling policy service", - "batch controller", - "cluster controller", - "job controller", - "node group controller", - "workflow template service" - ], - "scripts": { - "clean": "gts clean", - "compile": "tsc -p . && cp -r protos build/ && minifyProtoJson", - "compile-protos": "compileProtos src", - "docs": "jsdoc -c .jsdoc.js", - "predocs-test": "npm run docs", - "docs-test": "linkinator docs", - "fix": "gts fix", - "lint": "gts check", - "prepare": "npm run compile-protos && npm run compile", - "system-test": "c8 mocha build/system-test", - "test": "c8 mocha build/test" - }, - "dependencies": { - "google-gax": "^3.5.2" - }, - "devDependencies": { - "@types/mocha": "^9.1.1", - "@types/node": "^16.11.62", - "@types/sinon": "^10.0.13", - "c8": "^7.12.0", - "gts": "^3.1.1", - "jsdoc": "^3.6.11", - "jsdoc-fresh": "^2.0.1", - "jsdoc-region-tag": "^2.0.1", - "linkinator": "^4.0.3", - "mocha": "^10.0.0", - "null-loader": "^4.0.1", - "pack-n-play": "^1.0.0-2", - "sinon": "^14.0.0", - "ts-loader": "^8.4.0", - "typescript": "^4.8.3", - "webpack": "^4.46.0", - "webpack-cli": "^4.10.0" - }, - "engines": { - "node": ">=v12" - } -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/autoscaling_policies.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/autoscaling_policies.proto deleted file mode 100644 index 18b2f7df36d..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/autoscaling_policies.proto +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.dataproc.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; -option java_multiple_files = true; -option java_outer_classname = "AutoscalingPoliciesProto"; -option java_package = "com.google.cloud.dataproc.v1"; -option (google.api.resource_definition) = { - type: "dataproc.googleapis.com/Region" - pattern: "projects/{project}/regions/{region}" -}; - -// The API interface for managing autoscaling policies in the -// Dataproc API. -service AutoscalingPolicyService { - option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; - - // Creates new autoscaling policy. - rpc CreateAutoscalingPolicy(CreateAutoscalingPolicyRequest) returns (AutoscalingPolicy) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/locations/*}/autoscalingPolicies" - body: "policy" - additional_bindings { - post: "/v1/{parent=projects/*/regions/*}/autoscalingPolicies" - body: "policy" - } - }; - option (google.api.method_signature) = "parent,policy"; - } - - // Updates (replaces) autoscaling policy. - // - // Disabled check for update_mask, because all updates will be full - // replacements. - rpc UpdateAutoscalingPolicy(UpdateAutoscalingPolicyRequest) returns (AutoscalingPolicy) { - option (google.api.http) = { - put: "/v1/{policy.name=projects/*/locations/*/autoscalingPolicies/*}" - body: "policy" - additional_bindings { - put: "/v1/{policy.name=projects/*/regions/*/autoscalingPolicies/*}" - body: "policy" - } - }; - option (google.api.method_signature) = "policy"; - } - - // Retrieves autoscaling policy. - rpc GetAutoscalingPolicy(GetAutoscalingPolicyRequest) returns (AutoscalingPolicy) { - option (google.api.http) = { - get: "/v1/{name=projects/*/locations/*/autoscalingPolicies/*}" - additional_bindings { - get: "/v1/{name=projects/*/regions/*/autoscalingPolicies/*}" - } - }; - option (google.api.method_signature) = "name"; - } - - // Lists autoscaling policies in the project. - rpc ListAutoscalingPolicies(ListAutoscalingPoliciesRequest) returns (ListAutoscalingPoliciesResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*/locations/*}/autoscalingPolicies" - additional_bindings { - get: "/v1/{parent=projects/*/regions/*}/autoscalingPolicies" - } - }; - option (google.api.method_signature) = "parent"; - } - - // Deletes an autoscaling policy. It is an error to delete an autoscaling - // policy that is in use by one or more clusters. - rpc DeleteAutoscalingPolicy(DeleteAutoscalingPolicyRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{name=projects/*/locations/*/autoscalingPolicies/*}" - additional_bindings { - delete: "/v1/{name=projects/*/regions/*/autoscalingPolicies/*}" - } - }; - option (google.api.method_signature) = "name"; - } -} - -// Describes an autoscaling policy for Dataproc cluster autoscaler. -message AutoscalingPolicy { - option (google.api.resource) = { - type: "dataproc.googleapis.com/AutoscalingPolicy" - pattern: "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}" - pattern: "projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}" - }; - - // Required. The policy id. - // - // The id must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), and hyphens (-). Cannot begin or end with underscore - // or hyphen. Must consist of between 3 and 50 characters. - // - string id = 1; - - // Output only. The "resource name" of the autoscaling policy, as described - // in https://cloud.google.com/apis/design/resource_names. - // - // * For `projects.regions.autoscalingPolicies`, the resource name of the - // policy has the following format: - // `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` - // - // * For `projects.locations.autoscalingPolicies`, the resource name of the - // policy has the following format: - // `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` - string name = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Autoscaling algorithm for policy. - oneof algorithm { - BasicAutoscalingAlgorithm basic_algorithm = 3 [(google.api.field_behavior) = REQUIRED]; - } - - // Required. Describes how the autoscaler will operate for primary workers. - InstanceGroupAutoscalingPolicyConfig worker_config = 4 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Describes how the autoscaler will operate for secondary workers. - InstanceGroupAutoscalingPolicyConfig secondary_worker_config = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The labels to associate with this autoscaling policy. - // Label **keys** must contain 1 to 63 characters, and must conform to - // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - // Label **values** may be empty, but, if present, must contain 1 to 63 - // characters, and must conform to [RFC - // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be - // associated with an autoscaling policy. - map labels = 6 [(google.api.field_behavior) = OPTIONAL]; -} - -// Basic algorithm for autoscaling. -message BasicAutoscalingAlgorithm { - oneof config { - // Required. YARN autoscaling configuration. - BasicYarnAutoscalingConfig yarn_config = 1 [(google.api.field_behavior) = REQUIRED]; - } - - // Optional. Duration between scaling events. A scaling period starts after - // the update operation from the previous event has completed. - // - // Bounds: [2m, 1d]. Default: 2m. - google.protobuf.Duration cooldown_period = 2 [(google.api.field_behavior) = OPTIONAL]; -} - -// Basic autoscaling configurations for YARN. -message BasicYarnAutoscalingConfig { - // Required. Timeout for YARN graceful decommissioning of Node Managers. - // Specifies the duration to wait for jobs to complete before forcefully - // removing workers (and potentially interrupting jobs). Only applicable to - // downscaling operations. - // - // Bounds: [0s, 1d]. - google.protobuf.Duration graceful_decommission_timeout = 5 [(google.api.field_behavior) = REQUIRED]; - - // Required. Fraction of average YARN pending memory in the last cooldown period - // for which to add workers. A scale-up factor of 1.0 will result in scaling - // up so that there is no pending memory remaining after the update (more - // aggressive scaling). A scale-up factor closer to 0 will result in a smaller - // magnitude of scaling up (less aggressive scaling). - // See [How autoscaling - // works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) - // for more information. - // - // Bounds: [0.0, 1.0]. - double scale_up_factor = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. Fraction of average YARN pending memory in the last cooldown period - // for which to remove workers. A scale-down factor of 1 will result in - // scaling down so that there is no available memory remaining after the - // update (more aggressive scaling). A scale-down factor of 0 disables - // removing workers, which can be beneficial for autoscaling a single job. - // See [How autoscaling - // works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) - // for more information. - // - // Bounds: [0.0, 1.0]. - double scale_down_factor = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Minimum scale-up threshold as a fraction of total cluster size - // before scaling occurs. For example, in a 20-worker cluster, a threshold of - // 0.1 means the autoscaler must recommend at least a 2-worker scale-up for - // the cluster to scale. A threshold of 0 means the autoscaler will scale up - // on any recommended change. - // - // Bounds: [0.0, 1.0]. Default: 0.0. - double scale_up_min_worker_fraction = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Minimum scale-down threshold as a fraction of total cluster size - // before scaling occurs. For example, in a 20-worker cluster, a threshold of - // 0.1 means the autoscaler must recommend at least a 2 worker scale-down for - // the cluster to scale. A threshold of 0 means the autoscaler will scale down - // on any recommended change. - // - // Bounds: [0.0, 1.0]. Default: 0.0. - double scale_down_min_worker_fraction = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// Configuration for the size bounds of an instance group, including its -// proportional size to other groups. -message InstanceGroupAutoscalingPolicyConfig { - // Optional. Minimum number of instances for this group. - // - // Primary workers - Bounds: [2, max_instances]. Default: 2. - // Secondary workers - Bounds: [0, max_instances]. Default: 0. - int32 min_instances = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Required. Maximum number of instances for this group. Required for primary - // workers. Note that by default, clusters will not use secondary workers. - // Required for secondary workers if the minimum secondary instances is set. - // - // Primary workers - Bounds: [min_instances, ). - // Secondary workers - Bounds: [min_instances, ). Default: 0. - int32 max_instances = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Weight for the instance group, which is used to determine the - // fraction of total workers in the cluster from this instance group. - // For example, if primary workers have weight 2, and secondary workers have - // weight 1, the cluster will have approximately 2 primary workers for each - // secondary worker. - // - // The cluster may not reach the specified balance if constrained - // by min/max bounds or other autoscaling settings. For example, if - // `max_instances` for secondary workers is 0, then only primary workers will - // be added. The cluster can also be out of balance when created. - // - // If weight is not set on any instance group, the cluster will default to - // equal weight for all groups: the cluster will attempt to maintain an equal - // number of workers in each group within the configured size bounds for each - // group. If weight is set for one group only, the cluster will default to - // zero weight on the unset group. For example if weight is set only on - // primary workers, the cluster will use primary workers only and no - // secondary workers. - int32 weight = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// A request to create an autoscaling policy. -message CreateAutoscalingPolicyRequest { - // Required. The "resource name" of the region or location, as described - // in https://cloud.google.com/apis/design/resource_names. - // - // * For `projects.regions.autoscalingPolicies.create`, the resource name - // of the region has the following format: - // `projects/{project_id}/regions/{region}` - // - // * For `projects.locations.autoscalingPolicies.create`, the resource name - // of the location has the following format: - // `projects/{project_id}/locations/{location}` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "dataproc.googleapis.com/AutoscalingPolicy" - } - ]; - - // Required. The autoscaling policy to create. - AutoscalingPolicy policy = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// A request to fetch an autoscaling policy. -message GetAutoscalingPolicyRequest { - // Required. The "resource name" of the autoscaling policy, as described - // in https://cloud.google.com/apis/design/resource_names. - // - // * For `projects.regions.autoscalingPolicies.get`, the resource name - // of the policy has the following format: - // `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` - // - // * For `projects.locations.autoscalingPolicies.get`, the resource name - // of the policy has the following format: - // `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "dataproc.googleapis.com/AutoscalingPolicy" - } - ]; -} - -// A request to update an autoscaling policy. -message UpdateAutoscalingPolicyRequest { - // Required. The updated autoscaling policy. - AutoscalingPolicy policy = 1 [(google.api.field_behavior) = REQUIRED]; -} - -// A request to delete an autoscaling policy. -// -// Autoscaling policies in use by one or more clusters will not be deleted. -message DeleteAutoscalingPolicyRequest { - // Required. The "resource name" of the autoscaling policy, as described - // in https://cloud.google.com/apis/design/resource_names. - // - // * For `projects.regions.autoscalingPolicies.delete`, the resource name - // of the policy has the following format: - // `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` - // - // * For `projects.locations.autoscalingPolicies.delete`, the resource name - // of the policy has the following format: - // `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "dataproc.googleapis.com/AutoscalingPolicy" - } - ]; -} - -// A request to list autoscaling policies in a project. -message ListAutoscalingPoliciesRequest { - // Required. The "resource name" of the region or location, as described - // in https://cloud.google.com/apis/design/resource_names. - // - // * For `projects.regions.autoscalingPolicies.list`, the resource name - // of the region has the following format: - // `projects/{project_id}/regions/{region}` - // - // * For `projects.locations.autoscalingPolicies.list`, the resource name - // of the location has the following format: - // `projects/{project_id}/locations/{location}` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "dataproc.googleapis.com/AutoscalingPolicy" - } - ]; - - // Optional. The maximum number of results to return in each response. - // Must be less than or equal to 1000. Defaults to 100. - int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The page token, returned by a previous call, to request the - // next page of results. - string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// A response to a request to list autoscaling policies in a project. -message ListAutoscalingPoliciesResponse { - // Output only. Autoscaling policies list. - repeated AutoscalingPolicy policies = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. This token is included in the response if there are more - // results to fetch. - string next_page_token = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/batches.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/batches.proto deleted file mode 100644 index eafb4e354ea..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/batches.proto +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.dataproc.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/cloud/dataproc/v1/shared.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; -option java_multiple_files = true; -option java_outer_classname = "BatchesProto"; -option java_package = "com.google.cloud.dataproc.v1"; - -// The BatchController provides methods to manage batch workloads. -service BatchController { - option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; - - // Creates a batch workload that executes asynchronously. - rpc CreateBatch(CreateBatchRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/locations/*}/batches" - body: "batch" - }; - option (google.api.method_signature) = "parent,batch,batch_id"; - option (google.longrunning.operation_info) = { - response_type: "Batch" - metadata_type: "google.cloud.dataproc.v1.BatchOperationMetadata" - }; - } - - // Gets the batch workload resource representation. - rpc GetBatch(GetBatchRequest) returns (Batch) { - option (google.api.http) = { - get: "/v1/{name=projects/*/locations/*/batches/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Lists batch workloads. - rpc ListBatches(ListBatchesRequest) returns (ListBatchesResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*/locations/*}/batches" - }; - option (google.api.method_signature) = "parent"; - } - - // Deletes the batch workload resource. If the batch is not in terminal state, - // the delete fails and the response returns `FAILED_PRECONDITION`. - rpc DeleteBatch(DeleteBatchRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{name=projects/*/locations/*/batches/*}" - }; - option (google.api.method_signature) = "name"; - } -} - -// A request to create a batch workload. -message CreateBatchRequest { - // Required. The parent resource where this batch will be created. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "dataproc.googleapis.com/Batch" - } - ]; - - // Required. The batch to create. - Batch batch = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The ID to use for the batch, which will become the final component of - // the batch's resource name. - // - // This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`. - string batch_id = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A unique ID used to identify the request. If the service - // receives two - // [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s - // with the same request_id, the second request is ignored and the - // Operation that corresponds to the first Batch created and stored - // in the backend is returned. - // - // Recommendation: Set this value to a - // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - // - // The value must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), and hyphens (-). The maximum length is 40 characters. - string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// A request to get the resource representation for a batch workload. -message GetBatchRequest { - // Required. The name of the batch to retrieve. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "dataproc.googleapis.com/Batch" - } - ]; -} - -// A request to list batch workloads in a project. -message ListBatchesRequest { - // Required. The parent, which owns this collection of batches. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "dataproc.googleapis.com/Batch" - } - ]; - - // Optional. The maximum number of batches to return in each response. - // The service may return fewer than this value. - // The default page size is 20; the maximum page size is 1000. - int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A page token received from a previous `ListBatches` call. - // Provide this token to retrieve the subsequent page. - string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// A list of batch workloads. -message ListBatchesResponse { - // The batches from the specified collection. - repeated Batch batches = 1; - - // A token, which can be sent as `page_token` to retrieve the next page. - // If this field is omitted, there are no subsequent pages. - string next_page_token = 2; -} - -// A request to delete a batch workload. -message DeleteBatchRequest { - // Required. The name of the batch resource to delete. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "dataproc.googleapis.com/Batch" - } - ]; -} - -// A representation of a batch workload in the service. -message Batch { - option (google.api.resource) = { - type: "dataproc.googleapis.com/Batch" - pattern: "projects/{project}/locations/{location}/batches/{batch}" - }; - - // Historical state information. - message StateHistory { - // Output only. The state of the batch at this point in history. - State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Details about the state at this point in history. - string state_message = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The time when the batch entered the historical state. - google.protobuf.Timestamp state_start_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - } - - // The batch state. - enum State { - // The batch state is unknown. - STATE_UNSPECIFIED = 0; - - // The batch is created before running. - PENDING = 1; - - // The batch is running. - RUNNING = 2; - - // The batch is cancelling. - CANCELLING = 3; - - // The batch cancellation was successful. - CANCELLED = 4; - - // The batch completed successfully. - SUCCEEDED = 5; - - // The batch is no longer running due to an error. - FAILED = 6; - } - - // Output only. The resource name of the batch. - string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. A batch UUID (Unique Universal Identifier). The service - // generates this value when it creates the batch. - string uuid = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The time when the batch was created. - google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // The application/framework-specific portion of the batch configuration. - oneof batch_config { - // Optional. PySpark batch config. - PySparkBatch pyspark_batch = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Spark batch config. - SparkBatch spark_batch = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. SparkR batch config. - SparkRBatch spark_r_batch = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. SparkSql batch config. - SparkSqlBatch spark_sql_batch = 7 [(google.api.field_behavior) = OPTIONAL]; - } - - // Output only. Runtime information about batch execution. - RuntimeInfo runtime_info = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The state of the batch. - State state = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Batch state details, such as a failure - // description if the state is `FAILED`. - string state_message = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The time when the batch entered a current state. - google.protobuf.Timestamp state_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The email address of the user who created the batch. - string creator = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. The labels to associate with this batch. - // Label **keys** must contain 1 to 63 characters, and must conform to - // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - // Label **values** may be empty, but, if present, must contain 1 to 63 - // characters, and must conform to [RFC - // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be - // associated with a batch. - map labels = 13 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Runtime configuration for the batch execution. - RuntimeConfig runtime_config = 14 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Environment configuration for the batch execution. - EnvironmentConfig environment_config = 15 [(google.api.field_behavior) = OPTIONAL]; - - // Output only. The resource name of the operation associated with this batch. - string operation = 16 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Historical state information for the batch. - repeated StateHistory state_history = 17 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// A configuration for running an -// [Apache -// PySpark](https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) -// batch workload. -message PySparkBatch { - // Required. The HCFS URI of the main Python file to use as the Spark driver. Must - // be a .py file. - string main_python_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The arguments to pass to the driver. Do not include arguments - // that can be set as batch properties, such as `--conf`, since a collision - // can occur that causes an incorrect batch submission. - repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS file URIs of Python files to pass to the PySpark - // framework. Supported file types: `.py`, `.egg`, and `.zip`. - repeated string python_file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of jar files to add to the classpath of the - // Spark driver and tasks. - repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of files to be placed in the working directory of - // each executor. - repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of archives to be extracted into the working directory - // of each executor. Supported file types: - // `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`. - repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; -} - -// A configuration for running an [Apache Spark](http://spark.apache.org/) -// batch workload. -message SparkBatch { - // The specification of the main method to call to drive the Spark - // workload. Specify either the jar file that contains the main class or the - // main class name. To pass both a main jar and a main class in that jar, add - // the jar to `jar_file_uris`, and then specify the main class - // name in `main_class`. - oneof driver { - // Optional. The HCFS URI of the jar file that contains the main class. - string main_jar_file_uri = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The name of the driver main class. The jar file that contains the class - // must be in the classpath or specified in `jar_file_uris`. - string main_class = 2 [(google.api.field_behavior) = OPTIONAL]; - } - - // Optional. The arguments to pass to the driver. Do not include arguments - // that can be set as batch properties, such as `--conf`, since a collision - // can occur that causes an incorrect batch submission. - repeated string args = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of jar files to add to the classpath of the - // Spark driver and tasks. - repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of files to be placed in the working directory of - // each executor. - repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of archives to be extracted into the working directory - // of each executor. Supported file types: - // `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`. - repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; -} - -// A configuration for running an -// [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html) -// batch workload. -message SparkRBatch { - // Required. The HCFS URI of the main R file to use as the driver. - // Must be a `.R` or `.r` file. - string main_r_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The arguments to pass to the Spark driver. Do not include arguments - // that can be set as batch properties, such as `--conf`, since a collision - // can occur that causes an incorrect batch submission. - repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of files to be placed in the working directory of - // each executor. - repeated string file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of archives to be extracted into the working directory - // of each executor. Supported file types: - // `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`. - repeated string archive_uris = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// A configuration for running -// [Apache Spark SQL](http://spark.apache.org/sql/) queries as a batch workload. -message SparkSqlBatch { - // Required. The HCFS URI of the script that contains Spark SQL queries to execute. - string query_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Mapping of query variable names to values (equivalent to the - // Spark SQL command: `SET name="value";`). - map query_variables = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. - repeated string jar_file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/clusters.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/clusters.proto deleted file mode 100644 index 11611fbf680..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/clusters.proto +++ /dev/null @@ -1,1473 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.dataproc.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/cloud/dataproc/v1/shared.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; -option java_multiple_files = true; -option java_outer_classname = "ClustersProto"; -option java_package = "com.google.cloud.dataproc.v1"; -option (google.api.resource_definition) = { - type: "container.googleapis.com/Cluster" - pattern: "projects/{project}/locations/{location}/clusters/{cluster}" -}; -option (google.api.resource_definition) = { - type: "metastore.googleapis.com/Service" - pattern: "projects/{project}/locations/{location}/services/{service}" -}; - -// The ClusterControllerService provides methods to manage clusters -// of Compute Engine instances. -service ClusterController { - option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; - - // Creates a cluster in a project. The returned - // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - rpc CreateCluster(CreateClusterRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/projects/{project_id}/regions/{region}/clusters" - body: "cluster" - }; - option (google.api.method_signature) = "project_id,region,cluster"; - option (google.longrunning.operation_info) = { - response_type: "Cluster" - metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" - }; - } - - // Updates a cluster in a project. The returned - // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - // The cluster must be in a - // [`RUNNING`][google.cloud.dataproc.v1.ClusterStatus.State] state or an error - // is returned. - rpc UpdateCluster(UpdateClusterRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" - body: "cluster" - }; - option (google.api.method_signature) = - "project_id,region,cluster_name,cluster,update_mask"; - option (google.longrunning.operation_info) = { - response_type: "Cluster" - metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" - }; - } - - // Stops a cluster in a project. - rpc StopCluster(StopClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:stop" - body: "*" - }; - option (google.longrunning.operation_info) = { - response_type: "Cluster" - metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" - }; - } - - // Starts a cluster in a project. - rpc StartCluster(StartClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:start" - body: "*" - }; - option (google.longrunning.operation_info) = { - response_type: "Cluster" - metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" - }; - } - - // Deletes a cluster in a project. The returned - // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - rpc DeleteCluster(DeleteClusterRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - delete: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" - }; - option (google.api.method_signature) = "project_id,region,cluster_name"; - option (google.longrunning.operation_info) = { - response_type: "google.protobuf.Empty" - metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" - }; - } - - // Gets the resource representation for a cluster in a project. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { - get: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" - }; - option (google.api.method_signature) = "project_id,region,cluster_name"; - } - - // Lists all regions/{region}/clusters in a project alphabetically. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { - get: "/v1/projects/{project_id}/regions/{region}/clusters" - }; - option (google.api.method_signature) = "project_id,region"; - option (google.api.method_signature) = "project_id,region,filter"; - } - - // Gets cluster diagnostic information. The returned - // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - // After the operation completes, - // [Operation.response][google.longrunning.Operation.response] - // contains - // [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). - rpc DiagnoseCluster(DiagnoseClusterRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose" - body: "*" - }; - option (google.api.method_signature) = "project_id,region,cluster_name"; - option (google.longrunning.operation_info) = { - response_type: "DiagnoseClusterResults" - metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" - }; - } -} - -// Describes the identifying information, config, and status of -// a Dataproc cluster -message Cluster { - // Required. The Google Cloud Platform project ID that the cluster belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The cluster name, which must be unique within a project. - // The name must start with a lowercase letter, and can contain - // up to 51 lowercase letters, numbers, and hyphens. It cannot end - // with a hyphen. The name of a deleted cluster can be reused. - string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The cluster config for a cluster of Compute Engine Instances. - // Note that Dataproc may set default values, and values may change - // when clusters are updated. - ClusterConfig config = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The virtual cluster config is used when creating a Dataproc - // cluster that does not directly control the underlying compute resources, - // for example, when creating a [Dataproc-on-GKE - // cluster](https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke). - // Dataproc may set default values, and values may change when - // clusters are updated. Exactly one of - // [config][google.cloud.dataproc.v1.Cluster.config] or - // [virtual_cluster_config][google.cloud.dataproc.v1.Cluster.virtual_cluster_config] - // must be specified. - VirtualClusterConfig virtual_cluster_config = 10 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The labels to associate with this cluster. - // Label **keys** must contain 1 to 63 characters, and must conform to - // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - // Label **values** may be empty, but, if present, must contain 1 to 63 - // characters, and must conform to [RFC - // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be - // associated with a cluster. - map labels = 8 [(google.api.field_behavior) = OPTIONAL]; - - // Output only. Cluster status. - ClusterStatus status = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The previous cluster status. - repeated ClusterStatus status_history = 7 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. A cluster UUID (Unique Universal Identifier). Dataproc - // generates this value when it creates the cluster. - string cluster_uuid = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Contains cluster daemon metrics such as HDFS and YARN stats. - // - // **Beta Feature**: This report is available for testing purposes only. It - // may be changed before final release. - ClusterMetrics metrics = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// The cluster config. -message ClusterConfig { - // Optional. A Cloud Storage bucket used to stage job - // dependencies, config files, and job driver console output. - // If you do not specify a staging bucket, Cloud - // Dataproc will determine a Cloud Storage location (US, - // ASIA, or EU) for your cluster's staging bucket according to the - // Compute Engine zone where your cluster is deployed, and then create - // and manage this project-level, per-location bucket (see - // [Dataproc staging and temp - // buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). - // **This field requires a Cloud Storage bucket name, not a `gs://...` URI to - // a Cloud Storage bucket.** - string config_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs - // data, such as Spark and MapReduce history files. If you do not specify a - // temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or - // EU) for your cluster's temp bucket according to the Compute Engine zone - // where your cluster is deployed, and then create and manage this - // project-level, per-location bucket. The default bucket has a TTL of 90 - // days, but you can use any TTL (or none) if you specify a bucket (see - // [Dataproc staging and temp - // buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). - // **This field requires a Cloud Storage bucket name, not a `gs://...` URI to - // a Cloud Storage bucket.** - string temp_bucket = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The shared Compute Engine config settings for - // all instances in a cluster. - GceClusterConfig gce_cluster_config = 8 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Compute Engine config settings for - // the cluster's master instance. - InstanceGroupConfig master_config = 9 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Compute Engine config settings for - // the cluster's worker instances. - InstanceGroupConfig worker_config = 10 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Compute Engine config settings for - // a cluster's secondary worker instances - InstanceGroupConfig secondary_worker_config = 12 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The config settings for cluster software. - SoftwareConfig software_config = 13 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Commands to execute on each node after config is - // completed. By default, executables are run on master and all worker nodes. - // You can test a node's `role` metadata to run an executable on - // a master or worker node, as shown below using `curl` (you can also use - // `wget`): - // - // ROLE=$(curl -H Metadata-Flavor:Google - // http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) - // if [[ "${ROLE}" == 'Master' ]]; then - // ... master specific actions ... - // else - // ... worker specific actions ... - // fi - repeated NodeInitializationAction initialization_actions = 11 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Encryption settings for the cluster. - EncryptionConfig encryption_config = 15 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Autoscaling config for the policy associated with the cluster. - // Cluster does not autoscale if this field is unset. - AutoscalingConfig autoscaling_config = 18 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Security settings for the cluster. - SecurityConfig security_config = 16 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Lifecycle setting for the cluster. - LifecycleConfig lifecycle_config = 17 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Port/endpoint configuration for this cluster - EndpointConfig endpoint_config = 19 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Metastore configuration. - MetastoreConfig metastore_config = 20 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The config for Dataproc metrics. - DataprocMetricConfig dataproc_metric_config = 23 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The node group settings. - repeated AuxiliaryNodeGroup auxiliary_node_groups = 25 - [(google.api.field_behavior) = OPTIONAL]; -} - -// The Dataproc cluster config for a cluster that does not directly control the -// underlying compute resources, such as a [Dataproc-on-GKE -// cluster](https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke). -message VirtualClusterConfig { - // Optional. A Cloud Storage bucket used to stage job - // dependencies, config files, and job driver console output. - // If you do not specify a staging bucket, Cloud - // Dataproc will determine a Cloud Storage location (US, - // ASIA, or EU) for your cluster's staging bucket according to the - // Compute Engine zone where your cluster is deployed, and then create - // and manage this project-level, per-location bucket (see - // [Dataproc staging and temp - // buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). - // **This field requires a Cloud Storage bucket name, not a `gs://...` URI to - // a Cloud Storage bucket.** - string staging_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; - - oneof infrastructure_config { - // Required. The configuration for running the Dataproc cluster on - // Kubernetes. - KubernetesClusterConfig kubernetes_cluster_config = 6 - [(google.api.field_behavior) = REQUIRED]; - } - - // Optional. Configuration of auxiliary services used by this cluster. - AuxiliaryServicesConfig auxiliary_services_config = 7 - [(google.api.field_behavior) = OPTIONAL]; -} - -// Auxiliary services configuration for a Cluster. -message AuxiliaryServicesConfig { - // Optional. The Hive Metastore configuration for this workload. - MetastoreConfig metastore_config = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Spark History Server configuration for the workload. - SparkHistoryServerConfig spark_history_server_config = 2 - [(google.api.field_behavior) = OPTIONAL]; -} - -// Endpoint config for this cluster -message EndpointConfig { - // Output only. The map of port descriptions to URLs. Will only be populated - // if enable_http_port_access is true. - map http_ports = 1 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. If true, enable http access to specific ports on the cluster - // from external sources. Defaults to false. - bool enable_http_port_access = 2 [(google.api.field_behavior) = OPTIONAL]; -} - -// Autoscaling Policy config associated with the cluster. -message AutoscalingConfig { - // Optional. The autoscaling policy used by the cluster. - // - // Only resource names including projectid and location (region) are valid. - // Examples: - // - // * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` - // * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` - // - // Note that the policy must be in the same project and Dataproc region. - string policy_uri = 1 [(google.api.field_behavior) = OPTIONAL]; -} - -// Encryption settings for the cluster. -message EncryptionConfig { - // Optional. The Cloud KMS key name to use for PD disk encryption for all - // instances in the cluster. - string gce_pd_kms_key_name = 1 [(google.api.field_behavior) = OPTIONAL]; -} - -// Common config settings for resources of Compute Engine cluster -// instances, applicable to all instances in the cluster. -message GceClusterConfig { - // `PrivateIpv6GoogleAccess` controls whether and how Dataproc cluster nodes - // can communicate with Google Services through gRPC over IPv6. - // These values are directly mapped to corresponding values in the - // [Compute Engine Instance - // fields](https://cloud.google.com/compute/docs/reference/rest/v1/instances). - enum PrivateIpv6GoogleAccess { - // If unspecified, Compute Engine default behavior will apply, which - // is the same as - // [INHERIT_FROM_SUBNETWORK][google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess.INHERIT_FROM_SUBNETWORK]. - PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED = 0; - - // Private access to and from Google Services configuration - // inherited from the subnetwork configuration. This is the - // default Compute Engine behavior. - INHERIT_FROM_SUBNETWORK = 1; - - // Enables outbound private IPv6 access to Google Services from the Dataproc - // cluster. - OUTBOUND = 2; - - // Enables bidirectional private IPv6 access between Google Services and the - // Dataproc cluster. - BIDIRECTIONAL = 3; - } - - // Optional. The zone where the Compute Engine cluster will be located. - // On a create request, it is required in the "global" region. If omitted - // in a non-global Dataproc region, the service will pick a zone in the - // corresponding Compute Engine region. On a get request, zone will - // always be present. - // - // A full URL, partial URI, or short name are valid. Examples: - // - // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` - // * `projects/[project_id]/zones/[zone]` - // * `us-central1-f` - string zone_uri = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Compute Engine network to be used for machine - // communications. Cannot be specified with subnetwork_uri. If neither - // `network_uri` nor `subnetwork_uri` is specified, the "default" network of - // the project is used, if it exists. Cannot be a "Custom Subnet Network" (see - // [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for - // more information). - // - // A full URL, partial URI, or short name are valid. Examples: - // - // * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` - // * `projects/[project_id]/regions/global/default` - // * `default` - string network_uri = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Compute Engine subnetwork to be used for machine - // communications. Cannot be specified with network_uri. - // - // A full URL, partial URI, or short name are valid. Examples: - // - // * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0` - // * `projects/[project_id]/regions/us-east1/subnetworks/sub0` - // * `sub0` - string subnetwork_uri = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If true, all instances in the cluster will only have internal IP - // addresses. By default, clusters are not restricted to internal IP - // addresses, and will have ephemeral external IP addresses assigned to each - // instance. This `internal_ip_only` restriction can only be enabled for - // subnetwork enabled networks, and all off-cluster dependencies must be - // configured to be accessible without external IP addresses. - bool internal_ip_only = 7 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The type of IPv6 access for a cluster. - PrivateIpv6GoogleAccess private_ipv6_google_access = 12 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The [Dataproc service - // account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) - // (also see [VM Data Plane - // identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) - // used by Dataproc cluster VM instances to access Google Cloud Platform - // services. - // - // If not specified, the - // [Compute Engine default service - // account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) - // is used. - string service_account = 8 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The URIs of service account scopes to be included in - // Compute Engine instances. The following base set of scopes is always - // included: - // - // * https://www.googleapis.com/auth/cloud.useraccounts.readonly - // * https://www.googleapis.com/auth/devstorage.read_write - // * https://www.googleapis.com/auth/logging.write - // - // If no scopes are specified, the following defaults are also provided: - // - // * https://www.googleapis.com/auth/bigquery - // * https://www.googleapis.com/auth/bigtable.admin.table - // * https://www.googleapis.com/auth/bigtable.data - // * https://www.googleapis.com/auth/devstorage.full_control - repeated string service_account_scopes = 3 - [(google.api.field_behavior) = OPTIONAL]; - - // The Compute Engine tags to add to all instances (see [Tagging - // instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). - repeated string tags = 4; - - // The Compute Engine metadata entries to add to all instances (see - // [Project and instance - // metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). - map metadata = 5; - - // Optional. Reservation Affinity for consuming Zonal reservation. - ReservationAffinity reservation_affinity = 11 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Node Group Affinity for sole-tenant clusters. - NodeGroupAffinity node_group_affinity = 13 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Shielded Instance Config for clusters using [Compute Engine - // Shielded - // VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm). - ShieldedInstanceConfig shielded_instance_config = 14 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Confidential Instance Config for clusters using [Confidential - // VMs](https://cloud.google.com/compute/confidential-vm/docs). - ConfidentialInstanceConfig confidential_instance_config = 15 - [(google.api.field_behavior) = OPTIONAL]; -} - -// Node Group Affinity for clusters using sole-tenant node groups. -// **The Dataproc `NodeGroupAffinity` resource is not related to the -// Dataproc [NodeGroup][google.cloud.dataproc.v1.NodeGroup] resource.** -message NodeGroupAffinity { - // Required. The URI of a - // sole-tenant [node group - // resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) - // that the cluster will be created on. - // - // A full URL, partial URI, or node group name are valid. Examples: - // - // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` - // * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` - // * `node-group-1` - string node_group_uri = 1 [(google.api.field_behavior) = REQUIRED]; -} - -// Shielded Instance Config for clusters using [Compute Engine Shielded -// VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm). -message ShieldedInstanceConfig { - // Optional. Defines whether instances have Secure Boot enabled. - bool enable_secure_boot = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Defines whether instances have the vTPM enabled. - bool enable_vtpm = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Defines whether instances have integrity monitoring enabled. - bool enable_integrity_monitoring = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// Confidential Instance Config for clusters using [Confidential -// VMs](https://cloud.google.com/compute/confidential-vm/docs) -message ConfidentialInstanceConfig { - // Optional. Defines whether the instance should have confidential compute - // enabled. - bool enable_confidential_compute = 1 [(google.api.field_behavior) = OPTIONAL]; -} - -// The config settings for Compute Engine resources in -// an instance group, such as a master or worker group. -message InstanceGroupConfig { - // Controls the use of preemptible instances within the group. - enum Preemptibility { - // Preemptibility is unspecified, the system will choose the - // appropriate setting for each instance group. - PREEMPTIBILITY_UNSPECIFIED = 0; - - // Instances are non-preemptible. - // - // This option is allowed for all instance groups and is the only valid - // value for Master and Worker instance groups. - NON_PREEMPTIBLE = 1; - - // Instances are [preemptible] - // (https://cloud.google.com/compute/docs/instances/preemptible). - // - // This option is allowed only for [secondary worker] - // (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) - // groups. - PREEMPTIBLE = 2; - } - - // Optional. The number of VM instances in the instance group. - // For [HA - // cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) - // [master_config](#FIELDS.master_config) groups, **must be set to 3**. - // For standard cluster [master_config](#FIELDS.master_config) groups, - // **must be set to 1**. - int32 num_instances = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Output only. The list of instance names. Dataproc derives the names - // from `cluster_name`, `num_instances`, and the instance group. - repeated string instance_names = 2 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. The Compute Engine image resource used for cluster instances. - // - // The URI can represent an image or image family. - // - // Image examples: - // - // * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` - // * `projects/[project_id]/global/images/[image-id]` - // * `image-id` - // - // Image family examples. Dataproc will use the most recent - // image from the family: - // - // * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` - // * `projects/[project_id]/global/images/family/[custom-image-family-name]` - // - // If the URI is unspecified, it will be inferred from - // `SoftwareConfig.image_version` or the system default. - string image_uri = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Compute Engine machine type used for cluster instances. - // - // A full URL, partial URI, or short name are valid. Examples: - // - // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` - // * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` - // * `n1-standard-2` - // - // **Auto Zone Exception**: If you are using the Dataproc - // [Auto Zone - // Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) - // feature, you must use the short name of the machine type - // resource, for example, `n1-standard-2`. - string machine_type_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Disk option config settings. - DiskConfig disk_config = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Output only. Specifies that this instance group contains preemptible - // instances. - bool is_preemptible = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. Specifies the preemptibility of the instance group. - // - // The default value for master and worker groups is - // `NON_PREEMPTIBLE`. This default cannot be changed. - // - // The default value for secondary instances is - // `PREEMPTIBLE`. - Preemptibility preemptibility = 10 [(google.api.field_behavior) = OPTIONAL]; - - // Output only. The config for Compute Engine Instance Group - // Manager that manages this group. - // This is only used for preemptible instance groups. - ManagedGroupConfig managed_group_config = 7 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. The Compute Engine accelerator configuration for these - // instances. - repeated AcceleratorConfig accelerators = 8 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Specifies the minimum cpu platform for the Instance Group. - // See [Dataproc -> Minimum CPU - // Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). - string min_cpu_platform = 9 [(google.api.field_behavior) = OPTIONAL]; -} - -// Specifies the resources used to actively manage an instance group. -message ManagedGroupConfig { - // Output only. The name of the Instance Template used for the Managed - // Instance Group. - string instance_template_name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The name of the Instance Group Manager for this group. - string instance_group_manager_name = 2 - [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Specifies the type and number of accelerator cards attached to the instances -// of an instance. See [GPUs on Compute -// Engine](https://cloud.google.com/compute/docs/gpus/). -message AcceleratorConfig { - // Full URL, partial URI, or short name of the accelerator type resource to - // expose to this instance. See - // [Compute Engine - // AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). - // - // Examples: - // - // * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` - // * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` - // * `nvidia-tesla-k80` - // - // **Auto Zone Exception**: If you are using the Dataproc - // [Auto Zone - // Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) - // feature, you must use the short name of the accelerator type - // resource, for example, `nvidia-tesla-k80`. - string accelerator_type_uri = 1; - - // The number of the accelerator cards of this type exposed to this instance. - int32 accelerator_count = 2; -} - -// Specifies the config of disk options for a group of VM instances. -message DiskConfig { - // Optional. Type of the boot disk (default is "pd-standard"). - // Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), - // "pd-ssd" (Persistent Disk Solid State Drive), - // or "pd-standard" (Persistent Disk Hard Disk Drive). - // See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types). - string boot_disk_type = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Size in GB of the boot disk (default is 500GB). - int32 boot_disk_size_gb = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Number of attached SSDs, from 0 to 8 (default is 0). - // If SSDs are not attached, the boot disk is used to store runtime logs and - // [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. - // If one or more SSDs are attached, this runtime bulk - // data is spread across them, and the boot disk contains only basic - // config and installed binaries. - int32 num_local_ssds = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Interface type of local SSDs (default is "scsi"). - // Valid values: "scsi" (Small Computer System Interface), - // "nvme" (Non-Volatile Memory Express). - // See [local SSD - // performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance). - string local_ssd_interface = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// Node group identification and configuration information. -message AuxiliaryNodeGroup { - // Required. Node group configuration. - NodeGroup node_group = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. A node group ID. Generated if not specified. - // - // The ID must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), and hyphens (-). Cannot begin or end with underscore - // or hyphen. Must consist of from 3 to 33 characters. - string node_group_id = 2 [(google.api.field_behavior) = OPTIONAL]; -} - -// Dataproc Node Group. -// **The Dataproc `NodeGroup` resource is not related to the -// Dataproc [NodeGroupAffinity][google.cloud.dataproc.v1.NodeGroupAffinity] -// resource.** -message NodeGroup { - option (google.api.resource) = { - type: "dataproc.googleapis.com/NodeGroup" - pattern: "projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}" - }; - - // Node group roles. - enum Role { - // Required unspecified role. - ROLE_UNSPECIFIED = 0; - - // Job drivers run on the node group. - DRIVER = 1; - } - - // The Node group [resource name](https://aip.dev/122). - string name = 1; - - // Required. Node group roles. - repeated Role roles = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The node group instance group configuration. - InstanceGroupConfig node_group_config = 3 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Node group labels. - // - // * Label **keys** must consist of from 1 to 63 characters and conform to - // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - // * Label **values** can be empty. If specified, they must consist of from - // 1 to 63 characters and conform to [RFC 1035] - // (https://www.ietf.org/rfc/rfc1035.txt). - // * The node group must have no more than 32 labels. - map labels = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// Specifies an executable to run on a fully configured node and a -// timeout period for executable completion. -message NodeInitializationAction { - // Required. Cloud Storage URI of executable file. - string executable_file = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Amount of time executable has to complete. Default is - // 10 minutes (see JSON representation of - // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). - // - // Cluster creation fails with an explanatory error message (the - // name of the executable that caused the error and the exceeded timeout - // period) if the executable is not completed at end of the timeout period. - google.protobuf.Duration execution_timeout = 2 - [(google.api.field_behavior) = OPTIONAL]; -} - -// The status of a cluster and its instances. -message ClusterStatus { - // The cluster state. - enum State { - // The cluster state is unknown. - UNKNOWN = 0; - - // The cluster is being created and set up. It is not ready for use. - CREATING = 1; - - // The cluster is currently running and healthy. It is ready for use. - // - // **Note:** The cluster state changes from "creating" to "running" status - // after the master node(s), first two primary worker nodes (and the last - // primary worker node if primary workers > 2) are running. - RUNNING = 2; - - // The cluster encountered an error. It is not ready for use. - ERROR = 3; - - // The cluster has encountered an error while being updated. Jobs can - // be submitted to the cluster, but the cluster cannot be updated. - ERROR_DUE_TO_UPDATE = 9; - - // The cluster is being deleted. It cannot be used. - DELETING = 4; - - // The cluster is being updated. It continues to accept and process jobs. - UPDATING = 5; - - // The cluster is being stopped. It cannot be used. - STOPPING = 6; - - // The cluster is currently stopped. It is not ready for use. - STOPPED = 7; - - // The cluster is being started. It is not ready for use. - STARTING = 8; - } - - // The cluster substate. - enum Substate { - // The cluster substate is unknown. - UNSPECIFIED = 0; - - // The cluster is known to be in an unhealthy state - // (for example, critical daemons are not running or HDFS capacity is - // exhausted). - // - // Applies to RUNNING state. - UNHEALTHY = 1; - - // The agent-reported status is out of date (may occur if - // Dataproc loses communication with Agent). - // - // Applies to RUNNING state. - STALE_STATUS = 2; - } - - // Output only. The cluster's state. - State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. Output only. Details of cluster's state. - string detail = 2 [ - (google.api.field_behavior) = OUTPUT_ONLY, - (google.api.field_behavior) = OPTIONAL - ]; - - // Output only. Time when this state was entered (see JSON representation of - // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Timestamp state_start_time = 3 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Additional state information that includes - // status reported by the agent. - Substate substate = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Security related configuration, including encryption, Kerberos, etc. -message SecurityConfig { - // Optional. Kerberos related configuration. - KerberosConfig kerberos_config = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Identity related configuration, including service account based - // secure multi-tenancy user mappings. - IdentityConfig identity_config = 2 [(google.api.field_behavior) = OPTIONAL]; -} - -// Specifies Kerberos related configuration. -message KerberosConfig { - // Optional. Flag to indicate whether to Kerberize the cluster (default: - // false). Set this field to true to enable Kerberos on a cluster. - bool enable_kerberos = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Cloud Storage URI of a KMS encrypted file containing the root - // principal password. - string root_principal_password_uri = 2 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The uri of the KMS key used to encrypt various sensitive - // files. - string kms_key_uri = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Cloud Storage URI of the keystore file used for SSL - // encryption. If not provided, Dataproc will provide a self-signed - // certificate. - string keystore_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Cloud Storage URI of the truststore file used for SSL - // encryption. If not provided, Dataproc will provide a self-signed - // certificate. - string truststore_uri = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Cloud Storage URI of a KMS encrypted file containing the - // password to the user provided keystore. For the self-signed certificate, - // this password is generated by Dataproc. - string keystore_password_uri = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Cloud Storage URI of a KMS encrypted file containing the - // password to the user provided key. For the self-signed certificate, this - // password is generated by Dataproc. - string key_password_uri = 7 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Cloud Storage URI of a KMS encrypted file containing the - // password to the user provided truststore. For the self-signed certificate, - // this password is generated by Dataproc. - string truststore_password_uri = 8 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The remote realm the Dataproc on-cluster KDC will trust, should - // the user enable cross realm trust. - string cross_realm_trust_realm = 9 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The KDC (IP or hostname) for the remote trusted realm in a cross - // realm trust relationship. - string cross_realm_trust_kdc = 10 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The admin server (IP or hostname) for the remote trusted realm in - // a cross realm trust relationship. - string cross_realm_trust_admin_server = 11 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Cloud Storage URI of a KMS encrypted file containing the - // shared password between the on-cluster Kerberos realm and the remote - // trusted realm, in a cross realm trust relationship. - string cross_realm_trust_shared_password_uri = 12 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Cloud Storage URI of a KMS encrypted file containing the - // master key of the KDC database. - string kdc_db_key_uri = 13 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The lifetime of the ticket granting ticket, in hours. - // If not specified, or user specifies 0, then default value 10 - // will be used. - int32 tgt_lifetime_hours = 14 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The name of the on-cluster Kerberos realm. - // If not specified, the uppercased domain of hostnames will be the realm. - string realm = 15 [(google.api.field_behavior) = OPTIONAL]; -} - -// Identity related configuration, including service account based -// secure multi-tenancy user mappings. -message IdentityConfig { - // Required. Map of user to service account. - map user_service_account_mapping = 1 - [(google.api.field_behavior) = REQUIRED]; -} - -// Specifies the selection and config of software inside the cluster. -message SoftwareConfig { - // Optional. The version of software inside the cluster. It must be one of the - // supported [Dataproc - // Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), - // such as "1.2" (including a subminor version, such as "1.2.29"), or the - // ["preview" - // version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). - // If unspecified, it defaults to the latest Debian version. - string image_version = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The properties to set on daemon config files. - // - // Property keys are specified in `prefix:property` format, for example - // `core:hadoop.tmp.dir`. The following are supported prefixes - // and their mappings: - // - // * capacity-scheduler: `capacity-scheduler.xml` - // * core: `core-site.xml` - // * distcp: `distcp-default.xml` - // * hdfs: `hdfs-site.xml` - // * hive: `hive-site.xml` - // * mapred: `mapred-site.xml` - // * pig: `pig.properties` - // * spark: `spark-defaults.conf` - // * yarn: `yarn-site.xml` - // - // For more information, see [Cluster - // properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties). - map properties = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The set of components to activate on the cluster. - repeated Component optional_components = 3 - [(google.api.field_behavior) = OPTIONAL]; -} - -// Specifies the cluster auto-delete schedule configuration. -message LifecycleConfig { - // Optional. The duration to keep the cluster alive while idling (when no jobs - // are running). Passing this threshold will cause the cluster to be - // deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON - // representation of - // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Duration idle_delete_ttl = 1 - [(google.api.field_behavior) = OPTIONAL]; - - // Either the exact time the cluster should be deleted at or - // the cluster maximum age. - oneof ttl { - // Optional. The time when cluster will be auto-deleted (see JSON - // representation of - // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Timestamp auto_delete_time = 2 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The lifetime duration of cluster. The cluster will be - // auto-deleted at the end of this period. Minimum value is 10 minutes; - // maximum value is 14 days (see JSON representation of - // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Duration auto_delete_ttl = 3 - [(google.api.field_behavior) = OPTIONAL]; - } - - // Output only. The time when cluster became idle (most recent job finished) - // and became eligible for deletion due to idleness (see JSON representation - // of - // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Timestamp idle_start_time = 4 - [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Specifies a Metastore configuration. -message MetastoreConfig { - // Required. Resource name of an existing Dataproc Metastore service. - // - // Example: - // - // * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]` - string dataproc_metastore_service = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "metastore.googleapis.com/Service" - } - ]; -} - -// Dataproc metric config. -message DataprocMetricConfig { - // A source for the collection of Dataproc OSS metrics (see [available OSS - // metrics] - // (https://cloud.google.com//dataproc/docs/guides/monitoring#available_oss_metrics)). - enum MetricSource { - // Required unspecified metric source. - METRIC_SOURCE_UNSPECIFIED = 0; - - // Default monitoring agent metrics. If this source is enabled, - // Dataproc enables the monitoring agent in Compute Engine, - // and collects default monitoring agent metrics, which are published - // with an `agent.googleapis.com` prefix. - MONITORING_AGENT_DEFAULTS = 1; - - // HDFS metric source. - HDFS = 2; - - // Spark metric source. - SPARK = 3; - - // YARN metric source. - YARN = 4; - - // Spark History Server metric source. - SPARK_HISTORY_SERVER = 5; - - // Hiveserver2 metric source. - HIVESERVER2 = 6; - } - - // A Dataproc OSS metric. - message Metric { - // Required. Default metrics are collected unless `metricOverrides` are - // specified for the metric source (see [Available OSS metrics] - // (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) - // for more information). - MetricSource metric_source = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Specify one or more [available OSS metrics] - // (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) - // to collect for the metric course (for the `SPARK` metric source, any - // [Spark metric] - // (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be - // specified). - // - // Provide metrics in the following format: - // METRIC_SOURCE:INSTANCE:GROUP:METRIC - // Use camelcase as appropriate. - // - // Examples: - // - // ``` - // yarn:ResourceManager:QueueMetrics:AppsCompleted - // spark:driver:DAGScheduler:job.allJobs - // sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed - // hiveserver2:JVM:Memory:NonHeapMemoryUsage.used - // ``` - // - // Notes: - // - // * Only the specified overridden metrics will be collected for the - // metric source. For example, if one or more `spark:executive` metrics - // are listed as metric overrides, other `SPARK` metrics will not be - // collected. The collection of the default metrics for other OSS metric - // sources is unaffected. For example, if both `SPARK` andd `YARN` metric - // sources are enabled, and overrides are provided for Spark metrics only, - // all default YARN metrics will be collected. - repeated string metric_overrides = 2 - [(google.api.field_behavior) = OPTIONAL]; - } - - // Required. Metrics sources to enable. - repeated Metric metrics = 1 [(google.api.field_behavior) = REQUIRED]; -} - -// Contains cluster daemon metrics, such as HDFS and YARN stats. -// -// **Beta Feature**: This report is available for testing purposes only. It may -// be changed before final release. -message ClusterMetrics { - // The HDFS metrics. - map hdfs_metrics = 1; - - // The YARN metrics. - map yarn_metrics = 2; -} - -// A request to create a cluster. -message CreateClusterRequest { - // Required. The ID of the Google Cloud Platform project that the cluster - // belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 3 [(google.api.field_behavior) = REQUIRED]; - - // Required. The cluster to create. - Cluster cluster = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. A unique ID used to identify the request. If the server receives - // two - // [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s - // with the same id, then the second request will be ignored and the - // first [google.longrunning.Operation][google.longrunning.Operation] created - // and stored in the backend is returned. - // - // It is recommended to always set this value to a - // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - // - // The ID must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), and hyphens (-). The maximum length is 40 characters. - string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Failure action when primary worker creation fails. - FailureAction action_on_failed_primary_workers = 5 - [(google.api.field_behavior) = OPTIONAL]; -} - -// A request to update a cluster. -message UpdateClusterRequest { - // Required. The ID of the Google Cloud Platform project the - // cluster belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 5 [(google.api.field_behavior) = REQUIRED]; - - // Required. The cluster name. - string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The changes to the cluster. - Cluster cluster = 3 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Timeout for graceful YARN decomissioning. Graceful - // decommissioning allows removing nodes from the cluster without - // interrupting jobs in progress. Timeout specifies how long to wait for jobs - // in progress to finish before forcefully removing nodes (and potentially - // interrupting jobs). Default timeout is 0 (for forceful decommission), and - // the maximum allowed timeout is 1 day. (see JSON representation of - // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). - // - // Only supported on Dataproc image versions 1.2 and higher. - google.protobuf.Duration graceful_decommission_timeout = 6 - [(google.api.field_behavior) = OPTIONAL]; - - // Required. Specifies the path, relative to `Cluster`, of - // the field to update. For example, to change the number of workers - // in a cluster to 5, the `update_mask` parameter would be - // specified as `config.worker_config.num_instances`, - // and the `PATCH` request body would specify the new value, as follows: - // - // { - // "config":{ - // "workerConfig":{ - // "numInstances":"5" - // } - // } - // } - // Similarly, to change the number of preemptible workers in a cluster to 5, - // the `update_mask` parameter would be - // `config.secondary_worker_config.num_instances`, and the `PATCH` request - // body would be set as follows: - // - // { - // "config":{ - // "secondaryWorkerConfig":{ - // "numInstances":"5" - // } - // } - // } - // Note: Currently, only the following fields can be updated: - // - // - // - // - // - // - // - // - // - // - // - // - // - // - // - // - // - // - // - // - // - // - // - //
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or - // change autoscaling policies
- google.protobuf.FieldMask update_mask = 4 - [(google.api.field_behavior) = REQUIRED]; - - // Optional. A unique ID used to identify the request. If the server - // receives two - // [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s - // with the same id, then the second request will be ignored and the - // first [google.longrunning.Operation][google.longrunning.Operation] created - // and stored in the backend is returned. - // - // It is recommended to always set this value to a - // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - // - // The ID must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), and hyphens (-). The maximum length is 40 characters. - string request_id = 7 [(google.api.field_behavior) = OPTIONAL]; -} - -// A request to stop a cluster. -message StopClusterRequest { - // Required. The ID of the Google Cloud Platform project the - // cluster belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The cluster name. - string cluster_name = 3 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Specifying the `cluster_uuid` means the RPC will fail - // (with error NOT_FOUND) if a cluster with the specified UUID does not exist. - string cluster_uuid = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A unique ID used to identify the request. If the server - // receives two - // [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s - // with the same id, then the second request will be ignored and the - // first [google.longrunning.Operation][google.longrunning.Operation] created - // and stored in the backend is returned. - // - // Recommendation: Set this value to a - // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - // - // The ID must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), and hyphens (-). The maximum length is 40 characters. - string request_id = 5 [(google.api.field_behavior) = OPTIONAL]; -} - -// A request to start a cluster. -message StartClusterRequest { - // Required. The ID of the Google Cloud Platform project the - // cluster belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The cluster name. - string cluster_name = 3 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Specifying the `cluster_uuid` means the RPC will fail - // (with error NOT_FOUND) if a cluster with the specified UUID does not exist. - string cluster_uuid = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A unique ID used to identify the request. If the server - // receives two - // [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s - // with the same id, then the second request will be ignored and the - // first [google.longrunning.Operation][google.longrunning.Operation] created - // and stored in the backend is returned. - // - // Recommendation: Set this value to a - // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - // - // The ID must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), and hyphens (-). The maximum length is 40 characters. - string request_id = 5 [(google.api.field_behavior) = OPTIONAL]; -} - -// A request to delete a cluster. -message DeleteClusterRequest { - // Required. The ID of the Google Cloud Platform project that the cluster - // belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 3 [(google.api.field_behavior) = REQUIRED]; - - // Required. The cluster name. - string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Specifying the `cluster_uuid` means the RPC should fail - // (with error NOT_FOUND) if cluster with specified UUID does not exist. - string cluster_uuid = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A unique ID used to identify the request. If the server - // receives two - // [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s - // with the same id, then the second request will be ignored and the - // first [google.longrunning.Operation][google.longrunning.Operation] created - // and stored in the backend is returned. - // - // It is recommended to always set this value to a - // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - // - // The ID must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), and hyphens (-). The maximum length is 40 characters. - string request_id = 5 [(google.api.field_behavior) = OPTIONAL]; -} - -// Request to get the resource representation for a cluster in a project. -message GetClusterRequest { - // Required. The ID of the Google Cloud Platform project that the cluster - // belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 3 [(google.api.field_behavior) = REQUIRED]; - - // Required. The cluster name. - string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// A request to list the clusters in a project. -message ListClustersRequest { - // Required. The ID of the Google Cloud Platform project that the cluster - // belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 4 [(google.api.field_behavior) = REQUIRED]; - - // Optional. A filter constraining the clusters to list. Filters are - // case-sensitive and have the following syntax: - // - // field = value [AND [field = value]] ... - // - // where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, - // and `[KEY]` is a label key. **value** can be `*` to match all values. - // `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, - // `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` - // contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` - // contains the `DELETING` and `ERROR` states. - // `clusterName` is the name of the cluster provided at creation time. - // Only the logical `AND` operator is supported; space-separated items are - // treated as having an implicit `AND` operator. - // - // Example filter: - // - // status.state = ACTIVE AND clusterName = mycluster - // AND labels.env = staging AND labels.starred = * - string filter = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The standard List page size. - int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The standard List page token. - string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// The list of all clusters in a project. -message ListClustersResponse { - // Output only. The clusters in the project. - repeated Cluster clusters = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. This token is included in the response if there are more - // results to fetch. To fetch additional results, provide this value as the - // `page_token` in a subsequent `ListClustersRequest`. - string next_page_token = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// A request to collect cluster diagnostic information. -message DiagnoseClusterRequest { - // Required. The ID of the Google Cloud Platform project that the cluster - // belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 3 [(google.api.field_behavior) = REQUIRED]; - - // Required. The cluster name. - string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// The location of diagnostic output. -message DiagnoseClusterResults { - // Output only. The Cloud Storage URI of the diagnostic output. - // The output report is a plain text file with a summary of collected - // diagnostics. - string output_uri = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Reservation Affinity for consuming Zonal reservation. -message ReservationAffinity { - // Indicates whether to consume capacity from an reservation or not. - enum Type { - TYPE_UNSPECIFIED = 0; - - // Do not consume from any allocated capacity. - NO_RESERVATION = 1; - - // Consume any reservation available. - ANY_RESERVATION = 2; - - // Must consume from a specific reservation. Must specify key value fields - // for specifying the reservations. - SPECIFIC_RESERVATION = 3; - } - - // Optional. Type of reservation to consume - Type consume_reservation_type = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Corresponds to the label key of reservation resource. - string key = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Corresponds to the label values of reservation resource. - repeated string values = 3 [(google.api.field_behavior) = OPTIONAL]; -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/jobs.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/jobs.proto deleted file mode 100644 index e9dcf9cc6b8..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/jobs.proto +++ /dev/null @@ -1,951 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.dataproc.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; -option java_multiple_files = true; -option java_outer_classname = "JobsProto"; -option java_package = "com.google.cloud.dataproc.v1"; - -// The JobController provides methods to manage jobs. -service JobController { - option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; - - // Submits a job to a cluster. - rpc SubmitJob(SubmitJobRequest) returns (Job) { - option (google.api.http) = { - post: "/v1/projects/{project_id}/regions/{region}/jobs:submit" - body: "*" - }; - option (google.api.method_signature) = "project_id,region,job"; - } - - // Submits job to a cluster. - rpc SubmitJobAsOperation(SubmitJobRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/projects/{project_id}/regions/{region}/jobs:submitAsOperation" - body: "*" - }; - option (google.api.method_signature) = "project_id, region, job"; - option (google.longrunning.operation_info) = { - response_type: "Job" - metadata_type: "JobMetadata" - }; - } - - // Gets the resource representation for a job in a project. - rpc GetJob(GetJobRequest) returns (Job) { - option (google.api.http) = { - get: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}" - }; - option (google.api.method_signature) = "project_id,region,job_id"; - } - - // Lists regions/{region}/jobs in a project. - rpc ListJobs(ListJobsRequest) returns (ListJobsResponse) { - option (google.api.http) = { - get: "/v1/projects/{project_id}/regions/{region}/jobs" - }; - option (google.api.method_signature) = "project_id,region"; - option (google.api.method_signature) = "project_id,region,filter"; - } - - // Updates a job in a project. - rpc UpdateJob(UpdateJobRequest) returns (Job) { - option (google.api.http) = { - patch: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}" - body: "job" - }; - } - - // Starts a job cancellation request. To access the job resource - // after cancellation, call - // [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) - // or - // [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). - rpc CancelJob(CancelJobRequest) returns (Job) { - option (google.api.http) = { - post: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel" - body: "*" - }; - option (google.api.method_signature) = "project_id,region,job_id"; - } - - // Deletes the job from the project. If the job is active, the delete fails, - // and the response returns `FAILED_PRECONDITION`. - rpc DeleteJob(DeleteJobRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}" - }; - option (google.api.method_signature) = "project_id,region,job_id"; - } -} - -// The runtime logging config of the job. -message LoggingConfig { - // The Log4j level for job execution. When running an - // [Apache Hive](https://hive.apache.org/) job, Cloud - // Dataproc configures the Hive client to an equivalent verbosity level. - enum Level { - // Level is unspecified. Use default level for log4j. - LEVEL_UNSPECIFIED = 0; - - // Use ALL level for log4j. - ALL = 1; - - // Use TRACE level for log4j. - TRACE = 2; - - // Use DEBUG level for log4j. - DEBUG = 3; - - // Use INFO level for log4j. - INFO = 4; - - // Use WARN level for log4j. - WARN = 5; - - // Use ERROR level for log4j. - ERROR = 6; - - // Use FATAL level for log4j. - FATAL = 7; - - // Turn off log4j. - OFF = 8; - } - - // The per-package log levels for the driver. This may include - // "root" package name to configure rootLogger. - // Examples: - // 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' - map driver_log_levels = 2; -} - -// A Dataproc job for running -// [Apache Hadoop -// MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) -// jobs on [Apache Hadoop -// YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). -message HadoopJob { - // Required. Indicates the location of the driver's main class. Specify - // either the jar file that contains the main class or the main class name. - // To specify both, add the jar file to `jar_file_uris`, and then specify - // the main class name in this property. - oneof driver { - // The HCFS URI of the jar file containing the main class. - // Examples: - // 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' - // 'hdfs:/tmp/test-samples/custom-wordcount.jar' - // 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' - string main_jar_file_uri = 1; - - // The name of the driver's main class. The jar file containing the class - // must be in the default CLASSPATH or specified in `jar_file_uris`. - string main_class = 2; - } - - // Optional. The arguments to pass to the driver. Do not - // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as - // job properties, since a collision may occur that causes an incorrect job - // submission. - repeated string args = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Jar file URIs to add to the CLASSPATHs of the - // Hadoop driver and tasks. - repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied - // to the working directory of Hadoop drivers and distributed tasks. Useful - // for naively parallel tasks. - repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of archives to be extracted in the working directory of - // Hadoop drivers and tasks. Supported file types: - // .jar, .tar, .tar.gz, .tgz, or .zip. - repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A mapping of property names to values, used to configure Hadoop. - // Properties that conflict with values set by the Dataproc API may be - // overwritten. Can include properties set in /etc/hadoop/conf/*-site and - // classes in user code. - map properties = 7 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL]; -} - -// A Dataproc job for running [Apache Spark](https://spark.apache.org/) -// applications on YARN. -message SparkJob { - // Required. The specification of the main method to call to drive the job. - // Specify either the jar file that contains the main class or the main class - // name. To pass both a main jar and a main class in that jar, add the jar to - // `CommonJob.jar_file_uris`, and then specify the main class name in - // `main_class`. - oneof driver { - // The HCFS URI of the jar file that contains the main class. - string main_jar_file_uri = 1; - - // The name of the driver's main class. The jar file that contains the class - // must be in the default CLASSPATH or specified in `jar_file_uris`. - string main_class = 2; - } - - // Optional. The arguments to pass to the driver. Do not include arguments, - // such as `--conf`, that can be set as job properties, since a collision may - // occur that causes an incorrect job submission. - repeated string args = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the - // Spark driver and tasks. - repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of files to be placed in the working directory of - // each executor. Useful for naively parallel tasks. - repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of archives to be extracted into the working directory - // of each executor. Supported file types: - // .jar, .tar, .tar.gz, .tgz, and .zip. - repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A mapping of property names to values, used to configure Spark. - // Properties that conflict with values set by the Dataproc API may be - // overwritten. Can include properties set in - // /etc/spark/conf/spark-defaults.conf and classes in user code. - map properties = 7 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL]; -} - -// A Dataproc job for running -// [Apache -// PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) -// applications on YARN. -message PySparkJob { - // Required. The HCFS URI of the main Python file to use as the driver. Must - // be a .py file. - string main_python_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The arguments to pass to the driver. Do not include arguments, - // such as `--conf`, that can be set as job properties, since a collision may - // occur that causes an incorrect job submission. - repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS file URIs of Python files to pass to the PySpark - // framework. Supported file types: .py, .egg, and .zip. - repeated string python_file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the - // Python driver and tasks. - repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of files to be placed in the working directory of - // each executor. Useful for naively parallel tasks. - repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of archives to be extracted into the working directory - // of each executor. Supported file types: - // .jar, .tar, .tar.gz, .tgz, and .zip. - repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A mapping of property names to values, used to configure PySpark. - // Properties that conflict with values set by the Dataproc API may be - // overwritten. Can include properties set in - // /etc/spark/conf/spark-defaults.conf and classes in user code. - map properties = 7 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL]; -} - -// A list of queries to run on a cluster. -message QueryList { - // Required. The queries to execute. You do not need to end a query expression - // with a semicolon. Multiple queries can be specified in one - // string by separating each with a semicolon. Here is an example of a - // Dataproc API snippet that uses a QueryList to specify a HiveJob: - // - // "hiveJob": { - // "queryList": { - // "queries": [ - // "query1", - // "query2", - // "query3;query4", - // ] - // } - // } - repeated string queries = 1 [(google.api.field_behavior) = REQUIRED]; -} - -// A Dataproc job for running [Apache Hive](https://hive.apache.org/) -// queries on YARN. -message HiveJob { - // Required. The sequence of Hive queries to execute, specified as either - // an HCFS file URI or a list of queries. - oneof queries { - // The HCFS URI of the script that contains Hive queries. - string query_file_uri = 1; - - // A list of queries. - QueryList query_list = 2; - } - - // Optional. Whether to continue executing queries if a query fails. - // The default value is `false`. Setting to `true` can be useful when - // executing independent parallel queries. - bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Mapping of query variable names to values (equivalent to the - // Hive command: `SET name="value";`). - map script_variables = 4 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A mapping of property names and values, used to configure Hive. - // Properties that conflict with values set by the Dataproc API may be - // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, - // /etc/hive/conf/hive-site.xml, and classes in user code. - map properties = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of jar files to add to the CLASSPATH of the - // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes - // and UDFs. - repeated string jar_file_uris = 6 [(google.api.field_behavior) = OPTIONAL]; -} - -// A Dataproc job for running [Apache Spark -// SQL](https://spark.apache.org/sql/) queries. -message SparkSqlJob { - // Required. The sequence of Spark SQL queries to execute, specified as - // either an HCFS file URI or as a list of queries. - oneof queries { - // The HCFS URI of the script that contains SQL queries. - string query_file_uri = 1; - - // A list of queries. - QueryList query_list = 2; - } - - // Optional. Mapping of query variable names to values (equivalent to the - // Spark SQL command: SET `name="value";`). - map script_variables = 3 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A mapping of property names to values, used to configure - // Spark SQL's SparkConf. Properties that conflict with values set by the - // Dataproc API may be overwritten. - map properties = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. - repeated string jar_file_uris = 56 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL]; -} - -// A Dataproc job for running [Apache Pig](https://pig.apache.org/) -// queries on YARN. -message PigJob { - // Required. The sequence of Pig queries to execute, specified as an HCFS - // file URI or a list of queries. - oneof queries { - // The HCFS URI of the script that contains the Pig queries. - string query_file_uri = 1; - - // A list of queries. - QueryList query_list = 2; - } - - // Optional. Whether to continue executing queries if a query fails. - // The default value is `false`. Setting to `true` can be useful when - // executing independent parallel queries. - bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Mapping of query variable names to values (equivalent to the Pig - // command: `name=[value]`). - map script_variables = 4 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A mapping of property names to values, used to configure Pig. - // Properties that conflict with values set by the Dataproc API may be - // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, - // /etc/pig/conf/pig.properties, and classes in user code. - map properties = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of jar files to add to the CLASSPATH of - // the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. - repeated string jar_file_uris = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL]; -} - -// A Dataproc job for running -// [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html) -// applications on YARN. -message SparkRJob { - // Required. The HCFS URI of the main R file to use as the driver. - // Must be a .R file. - string main_r_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The arguments to pass to the driver. Do not include arguments, - // such as `--conf`, that can be set as job properties, since a collision may - // occur that causes an incorrect job submission. - repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of files to be placed in the working directory of - // each executor. Useful for naively parallel tasks. - repeated string file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. HCFS URIs of archives to be extracted into the working directory - // of each executor. Supported file types: - // .jar, .tar, .tar.gz, .tgz, and .zip. - repeated string archive_uris = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A mapping of property names to values, used to configure SparkR. - // Properties that conflict with values set by the Dataproc API may be - // overwritten. Can include properties set in - // /etc/spark/conf/spark-defaults.conf and classes in user code. - map properties = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL]; -} - -// A Dataproc job for running [Presto](https://prestosql.io/) queries. -// **IMPORTANT**: The [Dataproc Presto Optional -// Component](https://cloud.google.com/dataproc/docs/concepts/components/presto) -// must be enabled when the cluster is created to submit a Presto job to the -// cluster. -message PrestoJob { - // Required. The sequence of Presto queries to execute, specified as - // either an HCFS file URI or as a list of queries. - oneof queries { - // The HCFS URI of the script that contains SQL queries. - string query_file_uri = 1; - - // A list of queries. - QueryList query_list = 2; - } - - // Optional. Whether to continue executing queries if a query fails. - // The default value is `false`. Setting to `true` can be useful when - // executing independent parallel queries. - bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The format in which query output will be displayed. See the - // Presto documentation for supported output formats - string output_format = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Presto client tags to attach to this query - repeated string client_tags = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A mapping of property names to values. Used to set Presto - // [session properties](https://prestodb.io/docs/current/sql/set-session.html) - // Equivalent to using the --session flag in the Presto CLI - map properties = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL]; -} - -// Dataproc job config. -message JobPlacement { - // Required. The name of the cluster where the job will be submitted. - string cluster_name = 1 [(google.api.field_behavior) = REQUIRED]; - - // Output only. A cluster UUID generated by the Dataproc service when - // the job is submitted. - string cluster_uuid = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. Cluster labels to identify a cluster where the job will be - // submitted. - map cluster_labels = 3 - [(google.api.field_behavior) = OPTIONAL]; -} - -// Dataproc job status. -message JobStatus { - // The job state. - enum State { - // The job state is unknown. - STATE_UNSPECIFIED = 0; - - // The job is pending; it has been submitted, but is not yet running. - PENDING = 1; - - // Job has been received by the service and completed initial setup; - // it will soon be submitted to the cluster. - SETUP_DONE = 8; - - // The job is running on the cluster. - RUNNING = 2; - - // A CancelJob request has been received, but is pending. - CANCEL_PENDING = 3; - - // Transient in-flight resources have been canceled, and the request to - // cancel the running job has been issued to the cluster. - CANCEL_STARTED = 7; - - // The job cancellation was successful. - CANCELLED = 4; - - // The job has completed successfully. - DONE = 5; - - // The job has completed, but encountered an error. - ERROR = 6; - - // Job attempt has failed. The detail field contains failure details for - // this attempt. - // - // Applies to restartable jobs only. - ATTEMPT_FAILURE = 9; - } - - // The job substate. - enum Substate { - // The job substate is unknown. - UNSPECIFIED = 0; - - // The Job is submitted to the agent. - // - // Applies to RUNNING state. - SUBMITTED = 1; - - // The Job has been received and is awaiting execution (it may be waiting - // for a condition to be met). See the "details" field for the reason for - // the delay. - // - // Applies to RUNNING state. - QUEUED = 2; - - // The agent-reported status is out of date, which may be caused by a - // loss of communication between the agent and Dataproc. If the - // agent does not send a timely update, the job will fail. - // - // Applies to RUNNING state. - STALE_STATUS = 3; - } - - // Output only. A state message specifying the overall job state. - State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. Output only. Job state details, such as an error - // description if the state is ERROR. - string details = 2 [ - (google.api.field_behavior) = OUTPUT_ONLY, - (google.api.field_behavior) = OPTIONAL - ]; - - // Output only. The time when this state was entered. - google.protobuf.Timestamp state_start_time = 6 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Additional state information, which includes - // status reported by the agent. - Substate substate = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Encapsulates the full scoping used to reference a job. -message JobReference { - // Optional. The ID of the Google Cloud Platform project that the job belongs - // to. If specified, must match the request project ID. - string project_id = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The job ID, which must be unique within the project. - // - // The ID must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), or hyphens (-). The maximum length is 100 characters. - // - // If not specified by the caller, the job ID will be provided by the server. - string job_id = 2 [(google.api.field_behavior) = OPTIONAL]; -} - -// A YARN application created by a job. Application information is a subset of -// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. -// -// **Beta Feature**: This report is available for testing purposes only. It may -// be changed before final release. -message YarnApplication { - // The application state, corresponding to - // YarnProtos.YarnApplicationStateProto. - enum State { - // Status is unspecified. - STATE_UNSPECIFIED = 0; - - // Status is NEW. - NEW = 1; - - // Status is NEW_SAVING. - NEW_SAVING = 2; - - // Status is SUBMITTED. - SUBMITTED = 3; - - // Status is ACCEPTED. - ACCEPTED = 4; - - // Status is RUNNING. - RUNNING = 5; - - // Status is FINISHED. - FINISHED = 6; - - // Status is FAILED. - FAILED = 7; - - // Status is KILLED. - KILLED = 8; - } - - // Required. The application name. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The application state. - State state = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The numerical progress of the application, from 1 to 100. - float progress = 3 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or - // TimelineServer that provides application-specific information. The URL uses - // the internal hostname, and requires a proxy server for resolution and, - // possibly, access. - string tracking_url = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// A Dataproc job resource. -message Job { - // Optional. The fully qualified reference to the job, which can be used to - // obtain the equivalent REST path of the job resource. If this property - // is not specified when a job is created, the server generates a - // job_id. - JobReference reference = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Required. Job information, including how, when, and where to - // run the job. - JobPlacement placement = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The application/framework-specific portion of the job. - oneof type_job { - // Optional. Job is a Hadoop job. - HadoopJob hadoop_job = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a Spark job. - SparkJob spark_job = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a PySpark job. - PySparkJob pyspark_job = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a Hive job. - HiveJob hive_job = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a Pig job. - PigJob pig_job = 7 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a SparkR job. - SparkRJob spark_r_job = 21 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a SparkSql job. - SparkSqlJob spark_sql_job = 12 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a Presto job. - PrestoJob presto_job = 23 [(google.api.field_behavior) = OPTIONAL]; - } - - // Output only. The job status. Additional application-specific - // status information may be contained in the type_job - // and yarn_applications fields. - JobStatus status = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The previous job status. - repeated JobStatus status_history = 13 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The collection of YARN applications spun up by this job. - // - // **Beta** Feature: This report is available for testing purposes only. It - // may be changed before final release. - repeated YarnApplication yarn_applications = 9 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. A URI pointing to the location of the stdout of the job's - // driver program. - string driver_output_resource_uri = 17 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. If present, the location of miscellaneous control files - // which may be used as part of job setup and handling. If not present, - // control files may be placed in the same location as `driver_output_uri`. - string driver_control_files_uri = 15 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. The labels to associate with this job. - // Label **keys** must contain 1 to 63 characters, and must conform to - // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - // Label **values** may be empty, but, if present, must contain 1 to 63 - // characters, and must conform to [RFC - // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be - // associated with a job. - map labels = 18 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job scheduling configuration. - JobScheduling scheduling = 20 [(google.api.field_behavior) = OPTIONAL]; - - // Output only. A UUID that uniquely identifies a job within the project - // over time. This is in contrast to a user-settable reference.job_id that - // may be reused over time. - string job_uuid = 22 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Indicates whether the job is completed. If the value is - // `false`, the job is still in progress. If `true`, the job is completed, and - // `status.state` field will indicate if it was successful, failed, - // or cancelled. - bool done = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. Driver scheduling configuration. - DriverSchedulingConfig driver_scheduling_config = 27 - [(google.api.field_behavior) = OPTIONAL]; -} - -// Driver scheduling configuration. -message DriverSchedulingConfig { - // Required. The amount of memory in MB the driver is requesting. - int32 memory_mb = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The number of vCPUs the driver is requesting. - int32 vcores = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Job scheduling options. -message JobScheduling { - // Optional. Maximum number of times per hour a driver may be restarted as - // a result of driver exiting with non-zero code before job is - // reported failed. - // - // A job may be reported as thrashing if the driver exits with a non-zero code - // four times within a 10-minute window. - // - // Maximum value is 10. - // - // **Note:** This restartable job option is not supported in Dataproc - // [workflow templates] - // (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). - int32 max_failures_per_hour = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Maximum total number of times a driver may be restarted as a - // result of the driver exiting with a non-zero code. After the maximum number - // is reached, the job will be reported as failed. - // - // Maximum value is 240. - // - // **Note:** Currently, this restartable job option is - // not supported in Dataproc - // [workflow - // templates](https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). - int32 max_failures_total = 2 [(google.api.field_behavior) = OPTIONAL]; -} - -// A request to submit a job. -message SubmitJobRequest { - // Required. The ID of the Google Cloud Platform project that the job - // belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 3 [(google.api.field_behavior) = REQUIRED]; - - // Required. The job resource. - Job job = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. A unique id used to identify the request. If the server - // receives two - // [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s - // with the same id, then the second request will be ignored and the - // first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend - // is returned. - // - // It is recommended to always set this value to a - // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - // - // The id must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), and hyphens (-). The maximum length is 40 characters. - string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// Job Operation metadata. -message JobMetadata { - // Output only. The job id. - string job_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Most recent job status. - JobStatus status = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Operation type. - string operation_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Job submission time. - google.protobuf.Timestamp start_time = 4 - [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// A request to get the resource representation for a job in a project. -message GetJobRequest { - // Required. The ID of the Google Cloud Platform project that the job - // belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 3 [(google.api.field_behavior) = REQUIRED]; - - // Required. The job ID. - string job_id = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// A request to list jobs in a project. -message ListJobsRequest { - // A matcher that specifies categories of job states. - enum JobStateMatcher { - // Match all jobs, regardless of state. - ALL = 0; - - // Only match jobs in non-terminal states: PENDING, RUNNING, or - // CANCEL_PENDING. - ACTIVE = 1; - - // Only match jobs in terminal states: CANCELLED, DONE, or ERROR. - NON_ACTIVE = 2; - } - - // Required. The ID of the Google Cloud Platform project that the job - // belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 6 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The number of results to return in each response. - int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The page token, returned by a previous call, to request the - // next page of results. - string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If set, the returned jobs list includes only jobs that were - // submitted to the named cluster. - string cluster_name = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Specifies enumerated categories of jobs to list. - // (default = match ALL jobs). - // - // If `filter` is provided, `jobStateMatcher` will be ignored. - JobStateMatcher job_state_matcher = 5 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A filter constraining the jobs to list. Filters are - // case-sensitive and have the following syntax: - // - // [field = value] AND [field [= value]] ... - // - // where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label - // key. **value** can be `*` to match all values. - // `status.state` can be either `ACTIVE` or `NON_ACTIVE`. - // Only the logical `AND` operator is supported; space-separated items are - // treated as having an implicit `AND` operator. - // - // Example filter: - // - // status.state = ACTIVE AND labels.env = staging AND labels.starred = * - string filter = 7 [(google.api.field_behavior) = OPTIONAL]; -} - -// A request to update a job. -message UpdateJobRequest { - // Required. The ID of the Google Cloud Platform project that the job - // belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The job ID. - string job_id = 3 [(google.api.field_behavior) = REQUIRED]; - - // Required. The changes to the job. - Job job = 4 [(google.api.field_behavior) = REQUIRED]; - - // Required. Specifies the path, relative to Job, of - // the field to update. For example, to update the labels of a Job the - // update_mask parameter would be specified as - // labels, and the `PATCH` request body would specify the new - // value. Note: Currently, labels is the only - // field that can be updated. - google.protobuf.FieldMask update_mask = 5 - [(google.api.field_behavior) = REQUIRED]; -} - -// A list of jobs in a project. -message ListJobsResponse { - // Output only. Jobs list. - repeated Job jobs = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. This token is included in the response if there are more results - // to fetch. To fetch additional results, provide this value as the - // `page_token` in a subsequent ListJobsRequest. - string next_page_token = 2 [(google.api.field_behavior) = OPTIONAL]; -} - -// A request to cancel a job. -message CancelJobRequest { - // Required. The ID of the Google Cloud Platform project that the job - // belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 3 [(google.api.field_behavior) = REQUIRED]; - - // Required. The job ID. - string job_id = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// A request to delete a job. -message DeleteJobRequest { - // Required. The ID of the Google Cloud Platform project that the job - // belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Dataproc region in which to handle the request. - string region = 3 [(google.api.field_behavior) = REQUIRED]; - - // Required. The job ID. - string job_id = 2 [(google.api.field_behavior) = REQUIRED]; -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/operations.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/operations.proto deleted file mode 100644 index 854d9eda6ab..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/operations.proto +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.dataproc.v1; - -import "google/api/field_behavior.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; -option java_multiple_files = true; -option java_outer_classname = "OperationsProto"; -option java_package = "com.google.cloud.dataproc.v1"; - -// Metadata describing the Batch operation. -message BatchOperationMetadata { - // Operation type for Batch resources - enum BatchOperationType { - // Batch operation type is unknown. - BATCH_OPERATION_TYPE_UNSPECIFIED = 0; - - // Batch operation type. - BATCH = 1; - } - - // Name of the batch for the operation. - string batch = 1; - - // Batch UUID for the operation. - string batch_uuid = 2; - - // The time when the operation was created. - google.protobuf.Timestamp create_time = 3; - - // The time when the operation finished. - google.protobuf.Timestamp done_time = 4; - - // The operation type. - BatchOperationType operation_type = 6; - - // Short description of the operation. - string description = 7; - - // Labels associated with the operation. - map labels = 8; - - // Warnings encountered during operation execution. - repeated string warnings = 9; -} - -// The status of the operation. -message ClusterOperationStatus { - // The operation state. - enum State { - // Unused. - UNKNOWN = 0; - - // The operation has been created. - PENDING = 1; - - // The operation is running. - RUNNING = 2; - - // The operation is done; either cancelled or completed. - DONE = 3; - } - - // Output only. A message containing the operation state. - State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. A message containing the detailed operation state. - string inner_state = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. A message containing any operation metadata details. - string details = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The time this state was entered. - google.protobuf.Timestamp state_start_time = 4 - [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Metadata describing the operation. -message ClusterOperationMetadata { - // Output only. Name of the cluster for the operation. - string cluster_name = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Cluster UUID for the operation. - string cluster_uuid = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Current operation status. - ClusterOperationStatus status = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The previous operation status. - repeated ClusterOperationStatus status_history = 10 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The operation type. - string operation_type = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Short description of operation. - string description = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Labels associated with the operation - map labels = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Errors encountered during operation execution. - repeated string warnings = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Metadata describing the node group operation. -message NodeGroupOperationMetadata { - // Operation type for node group resources. - enum NodeGroupOperationType { - // Node group operation type is unknown. - NODE_GROUP_OPERATION_TYPE_UNSPECIFIED = 0; - - // Create node group operation type. - CREATE = 1; - - // Update node group operation type. - UPDATE = 2; - - // Delete node group operation type. - DELETE = 3; - - // Resize node group operation type. - RESIZE = 4; - } - - // Output only. Node group ID for the operation. - string node_group_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Cluster UUID associated with the node group operation. - string cluster_uuid = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Current operation status. - ClusterOperationStatus status = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The previous operation status. - repeated ClusterOperationStatus status_history = 4 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // The operation type. - NodeGroupOperationType operation_type = 5; - - // Output only. Short description of operation. - string description = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Labels associated with the operation. - map labels = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Errors encountered during operation execution. - repeated string warnings = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/shared.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/shared.proto deleted file mode 100644 index 18796915775..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/shared.proto +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.dataproc.v1; - -import "google/api/field_behavior.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; -option java_multiple_files = true; -option java_outer_classname = "SharedProto"; -option java_package = "com.google.cloud.dataproc.v1"; - -// Runtime configuration for a workload. -message RuntimeConfig { - // Optional. Version of the batch runtime. - string version = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Optional custom container image for the job runtime environment. If - // not specified, a default container image will be used. - string container_image = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A mapping of property names to values, which are used to configure workload - // execution. - map properties = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// Environment configuration for a workload. -message EnvironmentConfig { - // Optional. Execution configuration for a workload. - ExecutionConfig execution_config = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Peripherals configuration that workload has access to. - PeripheralsConfig peripherals_config = 2 [(google.api.field_behavior) = OPTIONAL]; -} - -// Execution configuration for a workload. -message ExecutionConfig { - // Optional. Service account that used to execute workload. - string service_account = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Network configuration for workload execution. - oneof network { - // Optional. Network URI to connect workload to. - string network_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Subnetwork URI to connect workload to. - string subnetwork_uri = 5 [(google.api.field_behavior) = OPTIONAL]; - } - - // Optional. Tags used for network traffic control. - repeated string network_tags = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The Cloud KMS key to use for encryption. - string kms_key = 7 [(google.api.field_behavior) = OPTIONAL]; -} - -// Spark History Server configuration for the workload. -message SparkHistoryServerConfig { - // Optional. Resource name of an existing Dataproc Cluster to act as a Spark History - // Server for the workload. - // - // Example: - // - // * `projects/[project_id]/regions/[region]/clusters/[cluster_name]` - string dataproc_cluster = 1 [ - (google.api.field_behavior) = OPTIONAL - ]; -} - -// Auxiliary services configuration for a workload. -message PeripheralsConfig { - // Optional. Resource name of an existing Dataproc Metastore service. - // - // Example: - // - // * `projects/[project_id]/locations/[region]/services/[service_id]` - string metastore_service = 1 [ - (google.api.field_behavior) = OPTIONAL - ]; - - // Optional. The Spark History Server configuration for the workload. - SparkHistoryServerConfig spark_history_server_config = 2 [(google.api.field_behavior) = OPTIONAL]; -} - -// Runtime information about workload execution. -message RuntimeInfo { - // Output only. Map of remote access endpoints (such as web interfaces and APIs) to their - // URIs. - map endpoints = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. A URI pointing to the location of the stdout and stderr of the workload. - string output_uri = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. A URI pointing to the location of the diagnostics tarball. - string diagnostic_output_uri = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// The cluster's GKE config. -message GkeClusterConfig { - // Optional. A target GKE cluster to deploy to. It must be in the same project and - // region as the Dataproc cluster (the GKE cluster can be zonal or regional). - // Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}' - string gke_cluster_target = 2 [ - (google.api.field_behavior) = OPTIONAL - ]; - - // Optional. GKE NodePools where workloads will be scheduled. At least one node pool - // must be assigned the 'default' role. Each role can be given to only a - // single NodePoolTarget. All NodePools must have the same location settings. - // If a nodePoolTarget is not specified, Dataproc constructs a default - // nodePoolTarget. - repeated GkeNodePoolTarget node_pool_target = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// The configuration for running the Dataproc cluster on Kubernetes. -message KubernetesClusterConfig { - // Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace - // does not exist, it is created. If it exists, Dataproc - // verifies that another Dataproc VirtualCluster is not installed - // into it. If not specified, the name of the Dataproc Cluster is used. - string kubernetes_namespace = 1 [(google.api.field_behavior) = OPTIONAL]; - - oneof config { - // Required. The configuration for running the Dataproc cluster on GKE. - GkeClusterConfig gke_cluster_config = 2 [(google.api.field_behavior) = REQUIRED]; - } - - // Optional. The software configuration for this Dataproc cluster running on Kubernetes. - KubernetesSoftwareConfig kubernetes_software_config = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// The software configuration for this Dataproc cluster running on Kubernetes. -message KubernetesSoftwareConfig { - // The components that should be installed in this Dataproc cluster. The key - // must be a string from the KubernetesComponent enumeration. The value is - // the version of the software to be installed. - // At least one entry must be specified. - map component_version = 1; - - // The properties to set on daemon config files. - // - // Property keys are specified in `prefix:property` format, for example - // `spark:spark.kubernetes.container.image`. The following are supported - // prefixes and their mappings: - // - // * spark: `spark-defaults.conf` - // - // For more information, see [Cluster - // properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties). - map properties = 2; -} - -// GKE NodePools that Dataproc workloads run on. -message GkeNodePoolTarget { - // `Role` specifies whose tasks will run on the NodePool. The roles can be - // specific to workloads. Exactly one GkeNodePoolTarget within the - // VirtualCluster must have 'default' role, which is used to run all workloads - // that are not associated with a NodePool. - enum Role { - // Role is unspecified. - ROLE_UNSPECIFIED = 0; - - // Any roles that are not directly assigned to a NodePool run on the - // `default` role's NodePool. - DEFAULT = 1; - - // Run controllers and webhooks. - CONTROLLER = 2; - - // Run spark driver. - SPARK_DRIVER = 3; - - // Run spark executors. - SPARK_EXECUTOR = 4; - } - - // Required. The target GKE NodePool. - // Format: - // 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}' - string node_pool = 1 [ - (google.api.field_behavior) = REQUIRED - ]; - - // Required. The types of role for a GKE NodePool - repeated Role roles = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The configuration for the GKE NodePool. - // - // If specified, Dataproc attempts to create a NodePool with the - // specified shape. If one with the same name already exists, it is - // verified against all specified fields. If a field differs, the - // virtual cluster creation will fail. - // - // If omitted, any NodePool with the specified name is used. If a - // NodePool with the specified name does not exist, Dataproc create a NodePool - // with default values. - GkeNodePoolConfig node_pool_config = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// The configuration of a GKE NodePool used by a [Dataproc-on-GKE -// cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster). -message GkeNodePoolConfig { - // Parameters that describe cluster nodes. - message GkeNodeConfig { - // Optional. The name of a Compute Engine [machine - // type](https://cloud.google.com/compute/docs/machine-types). - string machine_type = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Whether the nodes are created as [preemptible VM - // instances](https://cloud.google.com/compute/docs/instances/preemptible). - bool preemptible = 10 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The number of local SSD disks to attach to the node, which is limited by - // the maximum number of disks allowable per zone (see [Adding Local - // SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)). - int32 local_ssd_count = 7 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A list of [hardware - // accelerators](https://cloud.google.com/compute/docs/gpus) to attach to - // each node. - repeated GkeNodePoolAcceleratorConfig accelerators = 11 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. [Minimum CPU - // platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) - // to be used by this instance. The instance may be scheduled on the - // specified or a newer CPU platform. Specify the friendly names of CPU - // platforms, such as "Intel Haswell"` or Intel Sandy Bridge". - string min_cpu_platform = 13 [(google.api.field_behavior) = OPTIONAL]; - } - - // A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request - // for a NodePool. - message GkeNodePoolAcceleratorConfig { - // The number of accelerator cards exposed to an instance. - int64 accelerator_count = 1; - - // The accelerator type resource namename (see GPUs on Compute Engine). - string accelerator_type = 2; - } - - // GkeNodePoolAutoscaling contains information the cluster autoscaler needs to - // adjust the size of the node pool to the current cluster usage. - message GkeNodePoolAutoscalingConfig { - // The minimum number of nodes in the NodePool. Must be >= 0 and <= - // max_node_count. - int32 min_node_count = 2; - - // The maximum number of nodes in the NodePool. Must be >= min_node_count. - // **Note:** Quota must be sufficient to scale up the cluster. - int32 max_node_count = 3; - } - - // Optional. The node pool configuration. - GkeNodeConfig config = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The list of Compute Engine - // [zones](https://cloud.google.com/compute/docs/zones#available) where - // NodePool's nodes will be located. - // - // **Note:** Currently, only one zone may be specified. - // - // If a location is not specified during NodePool creation, Dataproc will - // choose a location. - repeated string locations = 13 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled - // only when a valid configuration is present. - GkeNodePoolAutoscalingConfig autoscaling = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// Cluster components that can be activated. -enum Component { - // Unspecified component. Specifying this will cause Cluster creation to fail. - COMPONENT_UNSPECIFIED = 0; - - // The Anaconda python distribution. The Anaconda component is not supported - // in the Dataproc - // 2.0 - // image. The 2.0 image is pre-installed with Miniconda. - ANACONDA = 5; - - // Docker - DOCKER = 13; - - // The Druid query engine. (alpha) - DRUID = 9; - - // Flink - FLINK = 14; - - // HBase. (beta) - HBASE = 11; - - // The Hive Web HCatalog (the REST service for accessing HCatalog). - HIVE_WEBHCAT = 3; - - // The Jupyter Notebook. - JUPYTER = 1; - - // The Presto query engine. - PRESTO = 6; - - // The Ranger service. - RANGER = 12; - - // The Solr service. - SOLR = 10; - - // The Zeppelin notebook. - ZEPPELIN = 4; - - // The Zookeeper service. - ZOOKEEPER = 8; -} - -// Actions in response to failure of a resource associated with a cluster. -enum FailureAction { - // When FailureAction is unspecified, failure action defaults to NO_ACTION. - FAILURE_ACTION_UNSPECIFIED = 0; - - // Take no action on failure to create a cluster resource. NO_ACTION is the - // default. - NO_ACTION = 1; - - // Delete the failed cluster resource. - DELETE = 2; -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/workflow_templates.proto b/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/workflow_templates.proto deleted file mode 100644 index 416ba26d03c..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/workflow_templates.proto +++ /dev/null @@ -1,807 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.dataproc.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/cloud/dataproc/v1/clusters.proto"; -import "google/cloud/dataproc/v1/jobs.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"; -option java_multiple_files = true; -option java_outer_classname = "WorkflowTemplatesProto"; -option java_package = "com.google.cloud.dataproc.v1"; - -// The API interface for managing Workflow Templates in the -// Dataproc API. -service WorkflowTemplateService { - option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; - - // Creates new workflow template. - rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest) returns (WorkflowTemplate) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/locations/*}/workflowTemplates" - body: "template" - additional_bindings { - post: "/v1/{parent=projects/*/regions/*}/workflowTemplates" - body: "template" - } - }; - option (google.api.method_signature) = "parent,template"; - } - - // Retrieves the latest workflow template. - // - // Can retrieve previously instantiated template by specifying optional - // version parameter. - rpc GetWorkflowTemplate(GetWorkflowTemplateRequest) returns (WorkflowTemplate) { - option (google.api.http) = { - get: "/v1/{name=projects/*/locations/*/workflowTemplates/*}" - additional_bindings { - get: "/v1/{name=projects/*/regions/*/workflowTemplates/*}" - } - }; - option (google.api.method_signature) = "name"; - } - - // Instantiates a template and begins execution. - // - // The returned Operation can be used to track execution of - // workflow by polling - // [operations.get][google.longrunning.Operations.GetOperation]. - // The Operation will complete when entire workflow is finished. - // - // The running workflow can be aborted via - // [operations.cancel][google.longrunning.Operations.CancelOperation]. - // This will cause any inflight jobs to be cancelled and workflow-owned - // clusters to be deleted. - // - // The [Operation.metadata][google.longrunning.Operation.metadata] will be - // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). - // Also see [Using - // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). - // - // On successful completion, - // [Operation.response][google.longrunning.Operation.response] will be - // [Empty][google.protobuf.Empty]. - rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{name=projects/*/locations/*/workflowTemplates/*}:instantiate" - body: "*" - additional_bindings { - post: "/v1/{name=projects/*/regions/*/workflowTemplates/*}:instantiate" - body: "*" - } - }; - option (google.api.method_signature) = "name"; - option (google.api.method_signature) = "name,parameters"; - option (google.longrunning.operation_info) = { - response_type: "google.protobuf.Empty" - metadata_type: "WorkflowMetadata" - }; - } - - // Instantiates a template and begins execution. - // - // This method is equivalent to executing the sequence - // [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], - // [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. - // - // The returned Operation can be used to track execution of - // workflow by polling - // [operations.get][google.longrunning.Operations.GetOperation]. - // The Operation will complete when entire workflow is finished. - // - // The running workflow can be aborted via - // [operations.cancel][google.longrunning.Operations.CancelOperation]. - // This will cause any inflight jobs to be cancelled and workflow-owned - // clusters to be deleted. - // - // The [Operation.metadata][google.longrunning.Operation.metadata] will be - // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). - // Also see [Using - // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). - // - // On successful completion, - // [Operation.response][google.longrunning.Operation.response] will be - // [Empty][google.protobuf.Empty]. - rpc InstantiateInlineWorkflowTemplate(InstantiateInlineWorkflowTemplateRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/locations/*}/workflowTemplates:instantiateInline" - body: "template" - additional_bindings { - post: "/v1/{parent=projects/*/regions/*}/workflowTemplates:instantiateInline" - body: "template" - } - }; - option (google.api.method_signature) = "parent,template"; - option (google.longrunning.operation_info) = { - response_type: "google.protobuf.Empty" - metadata_type: "WorkflowMetadata" - }; - } - - // Updates (replaces) workflow template. The updated template - // must contain version that matches the current server version. - rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest) returns (WorkflowTemplate) { - option (google.api.http) = { - put: "/v1/{template.name=projects/*/locations/*/workflowTemplates/*}" - body: "template" - additional_bindings { - put: "/v1/{template.name=projects/*/regions/*/workflowTemplates/*}" - body: "template" - } - }; - option (google.api.method_signature) = "template"; - } - - // Lists workflows that match the specified filter in the request. - rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest) returns (ListWorkflowTemplatesResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*/locations/*}/workflowTemplates" - additional_bindings { - get: "/v1/{parent=projects/*/regions/*}/workflowTemplates" - } - }; - option (google.api.method_signature) = "parent"; - } - - // Deletes a workflow template. It does not cancel in-progress workflows. - rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{name=projects/*/locations/*/workflowTemplates/*}" - additional_bindings { - delete: "/v1/{name=projects/*/regions/*/workflowTemplates/*}" - } - }; - option (google.api.method_signature) = "name"; - } -} - -// A Dataproc workflow template resource. -message WorkflowTemplate { - option (google.api.resource) = { - type: "dataproc.googleapis.com/WorkflowTemplate" - pattern: "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}" - pattern: "projects/{project}/locations/{location}/workflowTemplates/{workflow_template}" - history: ORIGINALLY_SINGLE_PATTERN - }; - - string id = 2 [(google.api.field_behavior) = REQUIRED]; - - // Output only. The resource name of the workflow template, as described - // in https://cloud.google.com/apis/design/resource_names. - // - // * For `projects.regions.workflowTemplates`, the resource name of the - // template has the following format: - // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` - // - // * For `projects.locations.workflowTemplates`, the resource name of the - // template has the following format: - // `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` - string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. Used to perform a consistent read-modify-write. - // - // This field should be left blank for a `CreateWorkflowTemplate` request. It - // is required for an `UpdateWorkflowTemplate` request, and must match the - // current server version. A typical update template flow would fetch the - // current template with a `GetWorkflowTemplate` request, which will return - // the current template with the `version` field filled in with the - // current server version. The user updates other fields in the template, - // then returns it as part of the `UpdateWorkflowTemplate` request. - int32 version = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Output only. The time template was created. - google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The time template was last updated. - google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. The labels to associate with this template. These labels - // will be propagated to all jobs and clusters created by the workflow - // instance. - // - // Label **keys** must contain 1 to 63 characters, and must conform to - // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - // - // Label **values** may be empty, but, if present, must contain 1 to 63 - // characters, and must conform to - // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - // - // No more than 32 labels can be associated with a template. - map labels = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Required. WorkflowTemplate scheduling information. - WorkflowTemplatePlacement placement = 7 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Directed Acyclic Graph of Jobs to submit. - repeated OrderedJob jobs = 8 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Template parameters whose values are substituted into the - // template. Values for parameters must be provided when the template is - // instantiated. - repeated TemplateParameter parameters = 9 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Timeout duration for the DAG of jobs, expressed in seconds (see - // [JSON representation of - // duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). - // The timeout duration must be from 10 minutes ("600s") to 24 hours - // ("86400s"). The timer begins when the first job is submitted. If the - // workflow is running at the end of the timeout period, any remaining jobs - // are cancelled, the workflow is ended, and if the workflow was running on a - // [managed - // cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), - // the cluster is deleted. - google.protobuf.Duration dag_timeout = 10 [(google.api.field_behavior) = OPTIONAL]; -} - -// Specifies workflow execution target. -// -// Either `managed_cluster` or `cluster_selector` is required. -message WorkflowTemplatePlacement { - // Required. Specifies where workflow executes; either on a managed - // cluster or an existing cluster chosen by labels. - oneof placement { - // A cluster that is managed by the workflow. - ManagedCluster managed_cluster = 1; - - // Optional. A selector that chooses target cluster for jobs based - // on metadata. - // - // The selector is evaluated at the time each job is submitted. - ClusterSelector cluster_selector = 2; - } -} - -// Cluster that is managed by the workflow. -message ManagedCluster { - // Required. The cluster name prefix. A unique cluster name will be formed by - // appending a random suffix. - // - // The name must contain only lower-case letters (a-z), numbers (0-9), - // and hyphens (-). Must begin with a letter. Cannot begin or end with - // hyphen. Must consist of between 2 and 35 characters. - string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The cluster configuration. - ClusterConfig config = 3 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The labels to associate with this cluster. - // - // Label keys must be between 1 and 63 characters long, and must conform to - // the following PCRE regular expression: - // [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - // - // Label values must be between 1 and 63 characters long, and must conform to - // the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - // - // No more than 32 labels can be associated with a given cluster. - map labels = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// A selector that chooses target cluster for jobs based on metadata. -message ClusterSelector { - // Optional. The zone where workflow process executes. This parameter does not - // affect the selection of the cluster. - // - // If unspecified, the zone of the first cluster matching the selector - // is used. - string zone = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Required. The cluster labels. Cluster must have all labels - // to match. - map cluster_labels = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// A job executed by the workflow. -message OrderedJob { - // Required. The step id. The id must be unique among all jobs - // within the template. - // - // The step id is used as prefix for job id, as job - // `goog-dataproc-workflow-step-id` label, and in - // [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - // steps. - // - // The id must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), and hyphens (-). Cannot begin or end with underscore - // or hyphen. Must consist of between 3 and 50 characters. - string step_id = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The job definition. - oneof job_type { - // Optional. Job is a Hadoop job. - HadoopJob hadoop_job = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a Spark job. - SparkJob spark_job = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a PySpark job. - PySparkJob pyspark_job = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a Hive job. - HiveJob hive_job = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a Pig job. - PigJob pig_job = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a SparkR job. - SparkRJob spark_r_job = 11 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a SparkSql job. - SparkSqlJob spark_sql_job = 7 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job is a Presto job. - PrestoJob presto_job = 12 [(google.api.field_behavior) = OPTIONAL]; - } - - // Optional. The labels to associate with this job. - // - // Label keys must be between 1 and 63 characters long, and must conform to - // the following regular expression: - // [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - // - // Label values must be between 1 and 63 characters long, and must conform to - // the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - // - // No more than 32 labels can be associated with a given job. - map labels = 8 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Job scheduling configuration. - JobScheduling scheduling = 9 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The optional list of prerequisite job step_ids. - // If not specified, the job will start at the beginning of workflow. - repeated string prerequisite_step_ids = 10 [(google.api.field_behavior) = OPTIONAL]; -} - -// A configurable parameter that replaces one or more fields in the template. -// Parameterizable fields: -// - Labels -// - File uris -// - Job properties -// - Job arguments -// - Script variables -// - Main class (in HadoopJob and SparkJob) -// - Zone (in ClusterSelector) -message TemplateParameter { - // Required. Parameter name. - // The parameter name is used as the key, and paired with the - // parameter value, which are passed to the template when the template - // is instantiated. - // The name must contain only capital letters (A-Z), numbers (0-9), and - // underscores (_), and must not start with a number. The maximum length is - // 40 characters. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. Paths to all fields that the parameter replaces. - // A field is allowed to appear in at most one parameter's list of field - // paths. - // - // A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask]. - // For example, a field path that references the zone field of a workflow - // template's cluster selector would be specified as - // `placement.clusterSelector.zone`. - // - // Also, field paths can reference fields using the following syntax: - // - // * Values in maps can be referenced by key: - // * labels['key'] - // * placement.clusterSelector.clusterLabels['key'] - // * placement.managedCluster.labels['key'] - // * placement.clusterSelector.clusterLabels['key'] - // * jobs['step-id'].labels['key'] - // - // * Jobs in the jobs list can be referenced by step-id: - // * jobs['step-id'].hadoopJob.mainJarFileUri - // * jobs['step-id'].hiveJob.queryFileUri - // * jobs['step-id'].pySparkJob.mainPythonFileUri - // * jobs['step-id'].hadoopJob.jarFileUris[0] - // * jobs['step-id'].hadoopJob.archiveUris[0] - // * jobs['step-id'].hadoopJob.fileUris[0] - // * jobs['step-id'].pySparkJob.pythonFileUris[0] - // - // * Items in repeated fields can be referenced by a zero-based index: - // * jobs['step-id'].sparkJob.args[0] - // - // * Other examples: - // * jobs['step-id'].hadoopJob.properties['key'] - // * jobs['step-id'].hadoopJob.args[0] - // * jobs['step-id'].hiveJob.scriptVariables['key'] - // * jobs['step-id'].hadoopJob.mainJarFileUri - // * placement.clusterSelector.zone - // - // It may not be possible to parameterize maps and repeated fields in their - // entirety since only individual map values and individual items in repeated - // fields can be referenced. For example, the following field paths are - // invalid: - // - // - placement.clusterSelector.clusterLabels - // - jobs['step-id'].sparkJob.args - repeated string fields = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Brief description of the parameter. - // Must not exceed 1024 characters. - string description = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Validation rules to be applied to this parameter's value. - ParameterValidation validation = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// Configuration for parameter validation. -message ParameterValidation { - // Required. The type of validation to be performed. - oneof validation_type { - // Validation based on regular expressions. - RegexValidation regex = 1; - - // Validation based on a list of allowed values. - ValueValidation values = 2; - } -} - -// Validation based on regular expressions. -message RegexValidation { - // Required. RE2 regular expressions used to validate the parameter's value. - // The value must match the regex in its entirety (substring - // matches are not sufficient). - repeated string regexes = 1 [(google.api.field_behavior) = REQUIRED]; -} - -// Validation based on a list of allowed values. -message ValueValidation { - // Required. List of allowed values for the parameter. - repeated string values = 1 [(google.api.field_behavior) = REQUIRED]; -} - -// A Dataproc workflow template resource. -message WorkflowMetadata { - // The operation state. - enum State { - // Unused. - UNKNOWN = 0; - - // The operation has been created. - PENDING = 1; - - // The operation is running. - RUNNING = 2; - - // The operation is done; either cancelled or completed. - DONE = 3; - } - - // Output only. The resource name of the workflow template as described - // in https://cloud.google.com/apis/design/resource_names. - // - // * For `projects.regions.workflowTemplates`, the resource name of the - // template has the following format: - // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` - // - // * For `projects.locations.workflowTemplates`, the resource name of the - // template has the following format: - // `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` - string template = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The version of template at the time of - // workflow instantiation. - int32 version = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The create cluster operation metadata. - ClusterOperation create_cluster = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The workflow graph. - WorkflowGraph graph = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The delete cluster operation metadata. - ClusterOperation delete_cluster = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The workflow state. - State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The name of the target cluster. - string cluster_name = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Map from parameter names to values that were used for those parameters. - map parameters = 8; - - // Output only. Workflow start time. - google.protobuf.Timestamp start_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Workflow end time. - google.protobuf.Timestamp end_time = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The UUID of target cluster. - string cluster_uuid = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The timeout duration for the DAG of jobs, expressed in seconds (see - // [JSON representation of - // duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Duration dag_timeout = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. DAG start time, only set for workflows with [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when DAG - // begins. - google.protobuf.Timestamp dag_start_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. DAG end time, only set for workflows with [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when DAG ends. - google.protobuf.Timestamp dag_end_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// The cluster operation triggered by a workflow. -message ClusterOperation { - // Output only. The id of the cluster operation. - string operation_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Error, if operation failed. - string error = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Indicates the operation is done. - bool done = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// The workflow graph. -message WorkflowGraph { - // Output only. The workflow nodes. - repeated WorkflowNode nodes = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// The workflow node. -message WorkflowNode { - // The workflow node state. - enum NodeState { - // State is unspecified. - NODE_STATE_UNSPECIFIED = 0; - - // The node is awaiting prerequisite node to finish. - BLOCKED = 1; - - // The node is runnable but not running. - RUNNABLE = 2; - - // The node is running. - RUNNING = 3; - - // The node completed successfully. - COMPLETED = 4; - - // The node failed. A node can be marked FAILED because - // its ancestor or peer failed. - FAILED = 5; - } - - // Output only. The name of the node. - string step_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Node's prerequisite nodes. - repeated string prerequisite_step_ids = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The job id; populated after the node enters RUNNING state. - string job_id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The node state. - NodeState state = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The error detail. - string error = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// A request to create a workflow template. -message CreateWorkflowTemplateRequest { - // Required. The resource name of the region or location, as described - // in https://cloud.google.com/apis/design/resource_names. - // - // * For `projects.regions.workflowTemplates.create`, the resource name of the - // region has the following format: - // `projects/{project_id}/regions/{region}` - // - // * For `projects.locations.workflowTemplates.create`, the resource name of - // the location has the following format: - // `projects/{project_id}/locations/{location}` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "dataproc.googleapis.com/WorkflowTemplate" - } - ]; - - // Required. The Dataproc workflow template to create. - WorkflowTemplate template = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// A request to fetch a workflow template. -message GetWorkflowTemplateRequest { - // Required. The resource name of the workflow template, as described - // in https://cloud.google.com/apis/design/resource_names. - // - // * For `projects.regions.workflowTemplates.get`, the resource name of the - // template has the following format: - // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` - // - // * For `projects.locations.workflowTemplates.get`, the resource name of the - // template has the following format: - // `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "dataproc.googleapis.com/WorkflowTemplate" - } - ]; - - // Optional. The version of workflow template to retrieve. Only previously - // instantiated versions can be retrieved. - // - // If unspecified, retrieves the current version. - int32 version = 2 [(google.api.field_behavior) = OPTIONAL]; -} - -// A request to instantiate a workflow template. -message InstantiateWorkflowTemplateRequest { - // Required. The resource name of the workflow template, as described - // in https://cloud.google.com/apis/design/resource_names. - // - // * For `projects.regions.workflowTemplates.instantiate`, the resource name - // of the template has the following format: - // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` - // - // * For `projects.locations.workflowTemplates.instantiate`, the resource name - // of the template has the following format: - // `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "dataproc.googleapis.com/WorkflowTemplate" - } - ]; - - // Optional. The version of workflow template to instantiate. If specified, - // the workflow will be instantiated only if the current version of - // the workflow template has the supplied version. - // - // This option cannot be used to instantiate a previous version of - // workflow template. - int32 version = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A tag that prevents multiple concurrent workflow - // instances with the same tag from running. This mitigates risk of - // concurrent instances started due to retries. - // - // It is recommended to always set this value to a - // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - // - // The tag must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), and hyphens (-). The maximum length is 40 characters. - string request_id = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Map from parameter names to values that should be used for those - // parameters. Values may not exceed 1000 characters. - map parameters = 6 [(google.api.field_behavior) = OPTIONAL]; -} - -// A request to instantiate an inline workflow template. -message InstantiateInlineWorkflowTemplateRequest { - // Required. The resource name of the region or location, as described - // in https://cloud.google.com/apis/design/resource_names. - // - // * For `projects.regions.workflowTemplates,instantiateinline`, the resource - // name of the region has the following format: - // `projects/{project_id}/regions/{region}` - // - // * For `projects.locations.workflowTemplates.instantiateinline`, the - // resource name of the location has the following format: - // `projects/{project_id}/locations/{location}` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "dataproc.googleapis.com/WorkflowTemplate" - } - ]; - - // Required. The workflow template to instantiate. - WorkflowTemplate template = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. A tag that prevents multiple concurrent workflow - // instances with the same tag from running. This mitigates risk of - // concurrent instances started due to retries. - // - // It is recommended to always set this value to a - // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - // - // The tag must contain only letters (a-z, A-Z), numbers (0-9), - // underscores (_), and hyphens (-). The maximum length is 40 characters. - string request_id = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// A request to update a workflow template. -message UpdateWorkflowTemplateRequest { - // Required. The updated workflow template. - // - // The `template.version` field must match the current version. - WorkflowTemplate template = 1 [(google.api.field_behavior) = REQUIRED]; -} - -// A request to list workflow templates in a project. -message ListWorkflowTemplatesRequest { - // Required. The resource name of the region or location, as described - // in https://cloud.google.com/apis/design/resource_names. - // - // * For `projects.regions.workflowTemplates,list`, the resource - // name of the region has the following format: - // `projects/{project_id}/regions/{region}` - // - // * For `projects.locations.workflowTemplates.list`, the - // resource name of the location has the following format: - // `projects/{project_id}/locations/{location}` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "dataproc.googleapis.com/WorkflowTemplate" - } - ]; - - // Optional. The maximum number of results to return in each response. - int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The page token, returned by a previous call, to request the - // next page of results. - string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// A response to a request to list workflow templates in a project. -message ListWorkflowTemplatesResponse { - // Output only. WorkflowTemplates list. - repeated WorkflowTemplate templates = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. This token is included in the response if there are more - // results to fetch. To fetch additional results, provide this value as the - // page_token in a subsequent ListWorkflowTemplatesRequest. - string next_page_token = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// A request to delete a workflow template. -// -// Currently started workflows will remain running. -message DeleteWorkflowTemplateRequest { - // Required. The resource name of the workflow template, as described - // in https://cloud.google.com/apis/design/resource_names. - // - // * For `projects.regions.workflowTemplates.delete`, the resource name - // of the template has the following format: - // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` - // - // * For `projects.locations.workflowTemplates.instantiate`, the resource name - // of the template has the following format: - // `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "dataproc.googleapis.com/WorkflowTemplate" - } - ]; - - // Optional. The version of workflow template to delete. If specified, - // will only delete the template if the current server version matches - // specified version. - int32 version = 2 [(google.api.field_behavior) = OPTIONAL]; -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.create_autoscaling_policy.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.create_autoscaling_policy.js deleted file mode 100644 index 48edf447c8e..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.create_autoscaling_policy.js +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(parent, policy) { - // [START dataproc_v1_generated_AutoscalingPolicyService_CreateAutoscalingPolicy_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The "resource name" of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * * For `projects.regions.autoscalingPolicies.create`, the resource name - * of the region has the following format: - * `projects/{project_id}/regions/{region}` - * * For `projects.locations.autoscalingPolicies.create`, the resource name - * of the location has the following format: - * `projects/{project_id}/locations/{location}` - */ - // const parent = 'abc123' - /** - * Required. The autoscaling policy to create. - */ - // const policy = {} - - // Imports the Dataproc library - const {AutoscalingPolicyServiceClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new AutoscalingPolicyServiceClient(); - - async function callCreateAutoscalingPolicy() { - // Construct request - const request = { - parent, - policy, - }; - - // Run request - const response = await dataprocClient.createAutoscalingPolicy(request); - console.log(response); - } - - callCreateAutoscalingPolicy(); - // [END dataproc_v1_generated_AutoscalingPolicyService_CreateAutoscalingPolicy_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.delete_autoscaling_policy.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.delete_autoscaling_policy.js deleted file mode 100644 index 3a3cbe273f1..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.delete_autoscaling_policy.js +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(name) { - // [START dataproc_v1_generated_AutoscalingPolicyService_DeleteAutoscalingPolicy_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The "resource name" of the autoscaling policy, as described - * in https://cloud.google.com/apis/design/resource_names. - * * For `projects.regions.autoscalingPolicies.delete`, the resource name - * of the policy has the following format: - * `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` - * * For `projects.locations.autoscalingPolicies.delete`, the resource name - * of the policy has the following format: - * `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` - */ - // const name = 'abc123' - - // Imports the Dataproc library - const {AutoscalingPolicyServiceClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new AutoscalingPolicyServiceClient(); - - async function callDeleteAutoscalingPolicy() { - // Construct request - const request = { - name, - }; - - // Run request - const response = await dataprocClient.deleteAutoscalingPolicy(request); - console.log(response); - } - - callDeleteAutoscalingPolicy(); - // [END dataproc_v1_generated_AutoscalingPolicyService_DeleteAutoscalingPolicy_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.get_autoscaling_policy.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.get_autoscaling_policy.js deleted file mode 100644 index a441db5a600..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.get_autoscaling_policy.js +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(name) { - // [START dataproc_v1_generated_AutoscalingPolicyService_GetAutoscalingPolicy_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The "resource name" of the autoscaling policy, as described - * in https://cloud.google.com/apis/design/resource_names. - * * For `projects.regions.autoscalingPolicies.get`, the resource name - * of the policy has the following format: - * `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` - * * For `projects.locations.autoscalingPolicies.get`, the resource name - * of the policy has the following format: - * `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` - */ - // const name = 'abc123' - - // Imports the Dataproc library - const {AutoscalingPolicyServiceClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new AutoscalingPolicyServiceClient(); - - async function callGetAutoscalingPolicy() { - // Construct request - const request = { - name, - }; - - // Run request - const response = await dataprocClient.getAutoscalingPolicy(request); - console.log(response); - } - - callGetAutoscalingPolicy(); - // [END dataproc_v1_generated_AutoscalingPolicyService_GetAutoscalingPolicy_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.list_autoscaling_policies.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.list_autoscaling_policies.js deleted file mode 100644 index 95e066e414c..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.list_autoscaling_policies.js +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(parent) { - // [START dataproc_v1_generated_AutoscalingPolicyService_ListAutoscalingPolicies_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The "resource name" of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * * For `projects.regions.autoscalingPolicies.list`, the resource name - * of the region has the following format: - * `projects/{project_id}/regions/{region}` - * * For `projects.locations.autoscalingPolicies.list`, the resource name - * of the location has the following format: - * `projects/{project_id}/locations/{location}` - */ - // const parent = 'abc123' - /** - * Optional. The maximum number of results to return in each response. - * Must be less than or equal to 1000. Defaults to 100. - */ - // const pageSize = 1234 - /** - * Optional. The page token, returned by a previous call, to request the - * next page of results. - */ - // const pageToken = 'abc123' - - // Imports the Dataproc library - const {AutoscalingPolicyServiceClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new AutoscalingPolicyServiceClient(); - - async function callListAutoscalingPolicies() { - // Construct request - const request = { - parent, - }; - - // Run request - const iterable = await dataprocClient.listAutoscalingPoliciesAsync(request); - for await (const response of iterable) { - console.log(response); - } - } - - callListAutoscalingPolicies(); - // [END dataproc_v1_generated_AutoscalingPolicyService_ListAutoscalingPolicies_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.update_autoscaling_policy.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.update_autoscaling_policy.js deleted file mode 100644 index fe51b21bf7d..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/autoscaling_policy_service.update_autoscaling_policy.js +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(policy) { - // [START dataproc_v1_generated_AutoscalingPolicyService_UpdateAutoscalingPolicy_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The updated autoscaling policy. - */ - // const policy = {} - - // Imports the Dataproc library - const {AutoscalingPolicyServiceClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new AutoscalingPolicyServiceClient(); - - async function callUpdateAutoscalingPolicy() { - // Construct request - const request = { - policy, - }; - - // Run request - const response = await dataprocClient.updateAutoscalingPolicy(request); - console.log(response); - } - - callUpdateAutoscalingPolicy(); - // [END dataproc_v1_generated_AutoscalingPolicyService_UpdateAutoscalingPolicy_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.create_batch.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.create_batch.js deleted file mode 100644 index d87aff89595..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.create_batch.js +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(parent, batch) { - // [START dataproc_v1_generated_BatchController_CreateBatch_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The parent resource where this batch will be created. - */ - // const parent = 'abc123' - /** - * Required. The batch to create. - */ - // const batch = {} - /** - * Optional. The ID to use for the batch, which will become the final component of - * the batch's resource name. - * This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`. - */ - // const batchId = 'abc123' - /** - * Optional. A unique ID used to identify the request. If the service - * receives two - * CreateBatchRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s - * with the same request_id, the second request is ignored and the - * Operation that corresponds to the first Batch created and stored - * in the backend is returned. - * Recommendation: Set this value to a - * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). - * The value must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - */ - // const requestId = 'abc123' - - // Imports the Dataproc library - const {BatchControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new BatchControllerClient(); - - async function callCreateBatch() { - // Construct request - const request = { - parent, - batch, - }; - - // Run request - const [operation] = await dataprocClient.createBatch(request); - const [response] = await operation.promise(); - console.log(response); - } - - callCreateBatch(); - // [END dataproc_v1_generated_BatchController_CreateBatch_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.delete_batch.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.delete_batch.js deleted file mode 100644 index d2422e49e04..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.delete_batch.js +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(name) { - // [START dataproc_v1_generated_BatchController_DeleteBatch_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The name of the batch resource to delete. - */ - // const name = 'abc123' - - // Imports the Dataproc library - const {BatchControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new BatchControllerClient(); - - async function callDeleteBatch() { - // Construct request - const request = { - name, - }; - - // Run request - const response = await dataprocClient.deleteBatch(request); - console.log(response); - } - - callDeleteBatch(); - // [END dataproc_v1_generated_BatchController_DeleteBatch_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.get_batch.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.get_batch.js deleted file mode 100644 index f8e697e1b68..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.get_batch.js +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(name) { - // [START dataproc_v1_generated_BatchController_GetBatch_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The name of the batch to retrieve. - */ - // const name = 'abc123' - - // Imports the Dataproc library - const {BatchControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new BatchControllerClient(); - - async function callGetBatch() { - // Construct request - const request = { - name, - }; - - // Run request - const response = await dataprocClient.getBatch(request); - console.log(response); - } - - callGetBatch(); - // [END dataproc_v1_generated_BatchController_GetBatch_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.list_batches.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.list_batches.js deleted file mode 100644 index 1255756c4f7..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/batch_controller.list_batches.js +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(parent) { - // [START dataproc_v1_generated_BatchController_ListBatches_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The parent, which owns this collection of batches. - */ - // const parent = 'abc123' - /** - * Optional. The maximum number of batches to return in each response. - * The service may return fewer than this value. - * The default page size is 20; the maximum page size is 1000. - */ - // const pageSize = 1234 - /** - * Optional. A page token received from a previous `ListBatches` call. - * Provide this token to retrieve the subsequent page. - */ - // const pageToken = 'abc123' - - // Imports the Dataproc library - const {BatchControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new BatchControllerClient(); - - async function callListBatches() { - // Construct request - const request = { - parent, - }; - - // Run request - const iterable = await dataprocClient.listBatchesAsync(request); - for await (const response of iterable) { - console.log(response); - } - } - - callListBatches(); - // [END dataproc_v1_generated_BatchController_ListBatches_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.create_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.create_cluster.js deleted file mode 100644 index 23d696ca9de..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.create_cluster.js +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region, cluster) { - // [START dataproc_v1_generated_ClusterController_CreateCluster_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project that the cluster - * belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Required. The cluster to create. - */ - // const cluster = {} - /** - * Optional. A unique ID used to identify the request. If the server receives - * two - * CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s - * with the same id, then the second request will be ignored and the - * first google.longrunning.Operation google.longrunning.Operation created - * and stored in the backend is returned. - * It is recommended to always set this value to a - * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). - * The ID must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - */ - // const requestId = 'abc123' - /** - * Optional. Failure action when primary worker creation fails. - */ - // const actionOnFailedPrimaryWorkers = {} - - // Imports the Dataproc library - const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new ClusterControllerClient(); - - async function callCreateCluster() { - // Construct request - const request = { - projectId, - region, - cluster, - }; - - // Run request - const [operation] = await dataprocClient.createCluster(request); - const [response] = await operation.promise(); - console.log(response); - } - - callCreateCluster(); - // [END dataproc_v1_generated_ClusterController_CreateCluster_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.delete_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.delete_cluster.js deleted file mode 100644 index 0105fcb45ab..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.delete_cluster.js +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region, clusterName) { - // [START dataproc_v1_generated_ClusterController_DeleteCluster_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project that the cluster - * belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Required. The cluster name. - */ - // const clusterName = 'abc123' - /** - * Optional. Specifying the `cluster_uuid` means the RPC should fail - * (with error NOT_FOUND) if cluster with specified UUID does not exist. - */ - // const clusterUuid = 'abc123' - /** - * Optional. A unique ID used to identify the request. If the server - * receives two - * DeleteClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s - * with the same id, then the second request will be ignored and the - * first google.longrunning.Operation google.longrunning.Operation created - * and stored in the backend is returned. - * It is recommended to always set this value to a - * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). - * The ID must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - */ - // const requestId = 'abc123' - - // Imports the Dataproc library - const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new ClusterControllerClient(); - - async function callDeleteCluster() { - // Construct request - const request = { - projectId, - region, - clusterName, - }; - - // Run request - const [operation] = await dataprocClient.deleteCluster(request); - const [response] = await operation.promise(); - console.log(response); - } - - callDeleteCluster(); - // [END dataproc_v1_generated_ClusterController_DeleteCluster_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.diagnose_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.diagnose_cluster.js deleted file mode 100644 index 4dc33eca599..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.diagnose_cluster.js +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region, clusterName) { - // [START dataproc_v1_generated_ClusterController_DiagnoseCluster_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project that the cluster - * belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Required. The cluster name. - */ - // const clusterName = 'abc123' - - // Imports the Dataproc library - const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new ClusterControllerClient(); - - async function callDiagnoseCluster() { - // Construct request - const request = { - projectId, - region, - clusterName, - }; - - // Run request - const [operation] = await dataprocClient.diagnoseCluster(request); - const [response] = await operation.promise(); - console.log(response); - } - - callDiagnoseCluster(); - // [END dataproc_v1_generated_ClusterController_DiagnoseCluster_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.get_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.get_cluster.js deleted file mode 100644 index d2cb65227f8..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.get_cluster.js +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region, clusterName) { - // [START dataproc_v1_generated_ClusterController_GetCluster_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project that the cluster - * belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Required. The cluster name. - */ - // const clusterName = 'abc123' - - // Imports the Dataproc library - const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new ClusterControllerClient(); - - async function callGetCluster() { - // Construct request - const request = { - projectId, - region, - clusterName, - }; - - // Run request - const response = await dataprocClient.getCluster(request); - console.log(response); - } - - callGetCluster(); - // [END dataproc_v1_generated_ClusterController_GetCluster_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.list_clusters.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.list_clusters.js deleted file mode 100644 index aaadff9f99b..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.list_clusters.js +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region) { - // [START dataproc_v1_generated_ClusterController_ListClusters_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project that the cluster - * belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Optional. A filter constraining the clusters to list. Filters are - * case-sensitive and have the following syntax: - * field = value AND field = value ... - * where **field** is one of `status.state`, `clusterName`, or `labels.KEY`, - * and `[KEY]` is a label key. **value** can be `*` to match all values. - * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, - * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` - * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` - * contains the `DELETING` and `ERROR` states. - * `clusterName` is the name of the cluster provided at creation time. - * Only the logical `AND` operator is supported; space-separated items are - * treated as having an implicit `AND` operator. - * Example filter: - * status.state = ACTIVE AND clusterName = mycluster - * AND labels.env = staging AND labels.starred = * - */ - // const filter = 'abc123' - /** - * Optional. The standard List page size. - */ - // const pageSize = 1234 - /** - * Optional. The standard List page token. - */ - // const pageToken = 'abc123' - - // Imports the Dataproc library - const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new ClusterControllerClient(); - - async function callListClusters() { - // Construct request - const request = { - projectId, - region, - }; - - // Run request - const iterable = await dataprocClient.listClustersAsync(request); - for await (const response of iterable) { - console.log(response); - } - } - - callListClusters(); - // [END dataproc_v1_generated_ClusterController_ListClusters_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.start_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.start_cluster.js deleted file mode 100644 index b79b9108271..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.start_cluster.js +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region, clusterName) { - // [START dataproc_v1_generated_ClusterController_StartCluster_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project the - * cluster belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Required. The cluster name. - */ - // const clusterName = 'abc123' - /** - * Optional. Specifying the `cluster_uuid` means the RPC will fail - * (with error NOT_FOUND) if a cluster with the specified UUID does not exist. - */ - // const clusterUuid = 'abc123' - /** - * Optional. A unique ID used to identify the request. If the server - * receives two - * StartClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s - * with the same id, then the second request will be ignored and the - * first google.longrunning.Operation google.longrunning.Operation created - * and stored in the backend is returned. - * Recommendation: Set this value to a - * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). - * The ID must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - */ - // const requestId = 'abc123' - - // Imports the Dataproc library - const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new ClusterControllerClient(); - - async function callStartCluster() { - // Construct request - const request = { - projectId, - region, - clusterName, - }; - - // Run request - const [operation] = await dataprocClient.startCluster(request); - const [response] = await operation.promise(); - console.log(response); - } - - callStartCluster(); - // [END dataproc_v1_generated_ClusterController_StartCluster_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.stop_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.stop_cluster.js deleted file mode 100644 index c3cf29a9dd3..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.stop_cluster.js +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region, clusterName) { - // [START dataproc_v1_generated_ClusterController_StopCluster_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project the - * cluster belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Required. The cluster name. - */ - // const clusterName = 'abc123' - /** - * Optional. Specifying the `cluster_uuid` means the RPC will fail - * (with error NOT_FOUND) if a cluster with the specified UUID does not exist. - */ - // const clusterUuid = 'abc123' - /** - * Optional. A unique ID used to identify the request. If the server - * receives two - * StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s - * with the same id, then the second request will be ignored and the - * first google.longrunning.Operation google.longrunning.Operation created - * and stored in the backend is returned. - * Recommendation: Set this value to a - * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). - * The ID must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - */ - // const requestId = 'abc123' - - // Imports the Dataproc library - const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new ClusterControllerClient(); - - async function callStopCluster() { - // Construct request - const request = { - projectId, - region, - clusterName, - }; - - // Run request - const [operation] = await dataprocClient.stopCluster(request); - const [response] = await operation.promise(); - console.log(response); - } - - callStopCluster(); - // [END dataproc_v1_generated_ClusterController_StopCluster_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.update_cluster.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.update_cluster.js deleted file mode 100644 index 13faa39caa3..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/cluster_controller.update_cluster.js +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region, clusterName, cluster, updateMask) { - // [START dataproc_v1_generated_ClusterController_UpdateCluster_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project the - * cluster belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Required. The cluster name. - */ - // const clusterName = 'abc123' - /** - * Required. The changes to the cluster. - */ - // const cluster = {} - /** - * Optional. Timeout for graceful YARN decomissioning. Graceful - * decommissioning allows removing nodes from the cluster without - * interrupting jobs in progress. Timeout specifies how long to wait for jobs - * in progress to finish before forcefully removing nodes (and potentially - * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. (see JSON representation of - * Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). - * Only supported on Dataproc image versions 1.2 and higher. - */ - // const gracefulDecommissionTimeout = {} - /** - * Required. Specifies the path, relative to `Cluster`, of - * the field to update. For example, to change the number of workers - * in a cluster to 5, the `update_mask` parameter would be - * specified as `config.worker_config.num_instances`, - * and the `PATCH` request body would specify the new value, as follows: - * { - * "config":{ - * "workerConfig":{ - * "numInstances":"5" - * } - * } - * } - * Similarly, to change the number of preemptible workers in a cluster to 5, - * the `update_mask` parameter would be - * `config.secondary_worker_config.num_instances`, and the `PATCH` request - * body would be set as follows: - * { - * "config":{ - * "secondaryWorkerConfig":{ - * "numInstances":"5" - * } - * } - * } - * Note: Currently, only the following fields can be updated: - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or - * change autoscaling policies
- */ - // const updateMask = {} - /** - * Optional. A unique ID used to identify the request. If the server - * receives two - * UpdateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s - * with the same id, then the second request will be ignored and the - * first google.longrunning.Operation google.longrunning.Operation created - * and stored in the backend is returned. - * It is recommended to always set this value to a - * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). - * The ID must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - */ - // const requestId = 'abc123' - - // Imports the Dataproc library - const {ClusterControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new ClusterControllerClient(); - - async function callUpdateCluster() { - // Construct request - const request = { - projectId, - region, - clusterName, - cluster, - updateMask, - }; - - // Run request - const [operation] = await dataprocClient.updateCluster(request); - const [response] = await operation.promise(); - console.log(response); - } - - callUpdateCluster(); - // [END dataproc_v1_generated_ClusterController_UpdateCluster_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.cancel_job.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.cancel_job.js deleted file mode 100644 index 3ef3b314c53..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.cancel_job.js +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region, jobId) { - // [START dataproc_v1_generated_JobController_CancelJob_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Required. The job ID. - */ - // const jobId = 'abc123' - - // Imports the Dataproc library - const {JobControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new JobControllerClient(); - - async function callCancelJob() { - // Construct request - const request = { - projectId, - region, - jobId, - }; - - // Run request - const response = await dataprocClient.cancelJob(request); - console.log(response); - } - - callCancelJob(); - // [END dataproc_v1_generated_JobController_CancelJob_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.delete_job.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.delete_job.js deleted file mode 100644 index a77ccb12112..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.delete_job.js +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region, jobId) { - // [START dataproc_v1_generated_JobController_DeleteJob_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Required. The job ID. - */ - // const jobId = 'abc123' - - // Imports the Dataproc library - const {JobControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new JobControllerClient(); - - async function callDeleteJob() { - // Construct request - const request = { - projectId, - region, - jobId, - }; - - // Run request - const response = await dataprocClient.deleteJob(request); - console.log(response); - } - - callDeleteJob(); - // [END dataproc_v1_generated_JobController_DeleteJob_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.get_job.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.get_job.js deleted file mode 100644 index 4f45a6ee8e6..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.get_job.js +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region, jobId) { - // [START dataproc_v1_generated_JobController_GetJob_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Required. The job ID. - */ - // const jobId = 'abc123' - - // Imports the Dataproc library - const {JobControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new JobControllerClient(); - - async function callGetJob() { - // Construct request - const request = { - projectId, - region, - jobId, - }; - - // Run request - const response = await dataprocClient.getJob(request); - console.log(response); - } - - callGetJob(); - // [END dataproc_v1_generated_JobController_GetJob_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.list_jobs.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.list_jobs.js deleted file mode 100644 index 7b746d83701..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.list_jobs.js +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region) { - // [START dataproc_v1_generated_JobController_ListJobs_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Optional. The number of results to return in each response. - */ - // const pageSize = 1234 - /** - * Optional. The page token, returned by a previous call, to request the - * next page of results. - */ - // const pageToken = 'abc123' - /** - * Optional. If set, the returned jobs list includes only jobs that were - * submitted to the named cluster. - */ - // const clusterName = 'abc123' - /** - * Optional. Specifies enumerated categories of jobs to list. - * (default = match ALL jobs). - * If `filter` is provided, `jobStateMatcher` will be ignored. - */ - // const jobStateMatcher = {} - /** - * Optional. A filter constraining the jobs to list. Filters are - * case-sensitive and have the following syntax: - * field = value AND field = value ... - * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label - * key. **value** can be `*` to match all values. - * `status.state` can be either `ACTIVE` or `NON_ACTIVE`. - * Only the logical `AND` operator is supported; space-separated items are - * treated as having an implicit `AND` operator. - * Example filter: - * status.state = ACTIVE AND labels.env = staging AND labels.starred = * - */ - // const filter = 'abc123' - - // Imports the Dataproc library - const {JobControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new JobControllerClient(); - - async function callListJobs() { - // Construct request - const request = { - projectId, - region, - }; - - // Run request - const iterable = await dataprocClient.listJobsAsync(request); - for await (const response of iterable) { - console.log(response); - } - } - - callListJobs(); - // [END dataproc_v1_generated_JobController_ListJobs_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job.js deleted file mode 100644 index 10d9ecdabf9..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job.js +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region, job) { - // [START dataproc_v1_generated_JobController_SubmitJob_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Required. The job resource. - */ - // const job = {} - /** - * Optional. A unique id used to identify the request. If the server - * receives two - * SubmitJobRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s - * with the same id, then the second request will be ignored and the - * first Job google.cloud.dataproc.v1.Job created and stored in the backend - * is returned. - * It is recommended to always set this value to a - * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). - * The id must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - */ - // const requestId = 'abc123' - - // Imports the Dataproc library - const {JobControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new JobControllerClient(); - - async function callSubmitJob() { - // Construct request - const request = { - projectId, - region, - job, - }; - - // Run request - const response = await dataprocClient.submitJob(request); - console.log(response); - } - - callSubmitJob(); - // [END dataproc_v1_generated_JobController_SubmitJob_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job_as_operation.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job_as_operation.js deleted file mode 100644 index 9bf24c7d4cc..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.submit_job_as_operation.js +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region, job) { - // [START dataproc_v1_generated_JobController_SubmitJobAsOperation_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Required. The job resource. - */ - // const job = {} - /** - * Optional. A unique id used to identify the request. If the server - * receives two - * SubmitJobRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s - * with the same id, then the second request will be ignored and the - * first Job google.cloud.dataproc.v1.Job created and stored in the backend - * is returned. - * It is recommended to always set this value to a - * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). - * The id must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - */ - // const requestId = 'abc123' - - // Imports the Dataproc library - const {JobControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new JobControllerClient(); - - async function callSubmitJobAsOperation() { - // Construct request - const request = { - projectId, - region, - job, - }; - - // Run request - const [operation] = await dataprocClient.submitJobAsOperation(request); - const [response] = await operation.promise(); - console.log(response); - } - - callSubmitJobAsOperation(); - // [END dataproc_v1_generated_JobController_SubmitJobAsOperation_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.update_job.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.update_job.js deleted file mode 100644 index 6c8a77f8b39..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/job_controller.update_job.js +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(projectId, region, jobId, job, updateMask) { - // [START dataproc_v1_generated_JobController_UpdateJob_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - */ - // const projectId = 'abc123' - /** - * Required. The Dataproc region in which to handle the request. - */ - // const region = 'us-central1' - /** - * Required. The job ID. - */ - // const jobId = 'abc123' - /** - * Required. The changes to the job. - */ - // const job = {} - /** - * Required. Specifies the path, relative to Job, of - * the field to update. For example, to update the labels of a Job the - * update_mask parameter would be specified as - * labels, and the `PATCH` request body would specify the new - * value. Note: Currently, labels is the only - * field that can be updated. - */ - // const updateMask = {} - - // Imports the Dataproc library - const {JobControllerClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new JobControllerClient(); - - async function callUpdateJob() { - // Construct request - const request = { - projectId, - region, - jobId, - job, - updateMask, - }; - - // Run request - const response = await dataprocClient.updateJob(request); - console.log(response); - } - - callUpdateJob(); - // [END dataproc_v1_generated_JobController_UpdateJob_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/snippet_metadata.google.cloud.dataproc.v1.json b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/snippet_metadata.google.cloud.dataproc.v1.json deleted file mode 100644 index 64e7abc2fb7..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/snippet_metadata.google.cloud.dataproc.v1.json +++ /dev/null @@ -1,1679 +0,0 @@ -{ - "clientLibrary": { - "name": "nodejs-dataproc", - "version": "0.1.0", - "language": "TYPESCRIPT", - "apis": [ - { - "id": "google.cloud.dataproc.v1", - "version": "v1" - } - ] - }, - "snippets": [ - { - "regionTag": "dataproc_v1_generated_AutoscalingPolicyService_CreateAutoscalingPolicy_async", - "title": "dataproc createAutoscalingPolicy Sample", - "origin": "API_DEFINITION", - "description": " Creates new autoscaling policy.", - "canonical": true, - "file": "autoscaling_policy_service.create_autoscaling_policy.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 65, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "CreateAutoscalingPolicy", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.CreateAutoscalingPolicy", - "async": true, - "parameters": [ - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "policy", - "type": ".google.cloud.dataproc.v1.AutoscalingPolicy" - } - ], - "resultType": ".google.cloud.dataproc.v1.AutoscalingPolicy", - "client": { - "shortName": "AutoscalingPolicyServiceClient", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyServiceClient" - }, - "method": { - "shortName": "CreateAutoscalingPolicy", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.CreateAutoscalingPolicy", - "service": { - "shortName": "AutoscalingPolicyService", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_AutoscalingPolicyService_UpdateAutoscalingPolicy_async", - "title": "dataproc updateAutoscalingPolicy Sample", - "origin": "API_DEFINITION", - "description": " Updates (replaces) autoscaling policy. Disabled check for update_mask, because all updates will be full replacements.", - "canonical": true, - "file": "autoscaling_policy_service.update_autoscaling_policy.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 53, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "UpdateAutoscalingPolicy", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.UpdateAutoscalingPolicy", - "async": true, - "parameters": [ - { - "name": "policy", - "type": ".google.cloud.dataproc.v1.AutoscalingPolicy" - } - ], - "resultType": ".google.cloud.dataproc.v1.AutoscalingPolicy", - "client": { - "shortName": "AutoscalingPolicyServiceClient", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyServiceClient" - }, - "method": { - "shortName": "UpdateAutoscalingPolicy", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.UpdateAutoscalingPolicy", - "service": { - "shortName": "AutoscalingPolicyService", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_AutoscalingPolicyService_GetAutoscalingPolicy_async", - "title": "dataproc getAutoscalingPolicy Sample", - "origin": "API_DEFINITION", - "description": " Retrieves autoscaling policy.", - "canonical": true, - "file": "autoscaling_policy_service.get_autoscaling_policy.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 60, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "GetAutoscalingPolicy", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.GetAutoscalingPolicy", - "async": true, - "parameters": [ - { - "name": "name", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.dataproc.v1.AutoscalingPolicy", - "client": { - "shortName": "AutoscalingPolicyServiceClient", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyServiceClient" - }, - "method": { - "shortName": "GetAutoscalingPolicy", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.GetAutoscalingPolicy", - "service": { - "shortName": "AutoscalingPolicyService", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_AutoscalingPolicyService_ListAutoscalingPolicies_async", - "title": "dataproc listAutoscalingPolicies Sample", - "origin": "API_DEFINITION", - "description": " Lists autoscaling policies in the project.", - "canonical": true, - "file": "autoscaling_policy_service.list_autoscaling_policies.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 72, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "ListAutoscalingPolicies", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.ListAutoscalingPolicies", - "async": true, - "parameters": [ - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "page_size", - "type": "TYPE_INT32" - }, - { - "name": "page_token", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.dataproc.v1.ListAutoscalingPoliciesResponse", - "client": { - "shortName": "AutoscalingPolicyServiceClient", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyServiceClient" - }, - "method": { - "shortName": "ListAutoscalingPolicies", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.ListAutoscalingPolicies", - "service": { - "shortName": "AutoscalingPolicyService", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_AutoscalingPolicyService_DeleteAutoscalingPolicy_async", - "title": "dataproc deleteAutoscalingPolicy Sample", - "origin": "API_DEFINITION", - "description": " Deletes an autoscaling policy. It is an error to delete an autoscaling policy that is in use by one or more clusters.", - "canonical": true, - "file": "autoscaling_policy_service.delete_autoscaling_policy.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 60, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "DeleteAutoscalingPolicy", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.DeleteAutoscalingPolicy", - "async": true, - "parameters": [ - { - "name": "name", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.protobuf.Empty", - "client": { - "shortName": "AutoscalingPolicyServiceClient", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyServiceClient" - }, - "method": { - "shortName": "DeleteAutoscalingPolicy", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService.DeleteAutoscalingPolicy", - "service": { - "shortName": "AutoscalingPolicyService", - "fullName": "google.cloud.dataproc.v1.AutoscalingPolicyService" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_BatchController_CreateBatch_async", - "title": "dataproc createBatch Sample", - "origin": "API_DEFINITION", - "description": " Creates a batch workload that executes asynchronously.", - "canonical": true, - "file": "batch_controller.create_batch.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 78, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "CreateBatch", - "fullName": "google.cloud.dataproc.v1.BatchController.CreateBatch", - "async": true, - "parameters": [ - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "batch", - "type": ".google.cloud.dataproc.v1.Batch" - }, - { - "name": "batch_id", - "type": "TYPE_STRING" - }, - { - "name": "request_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "BatchControllerClient", - "fullName": "google.cloud.dataproc.v1.BatchControllerClient" - }, - "method": { - "shortName": "CreateBatch", - "fullName": "google.cloud.dataproc.v1.BatchController.CreateBatch", - "service": { - "shortName": "BatchController", - "fullName": "google.cloud.dataproc.v1.BatchController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_BatchController_GetBatch_async", - "title": "dataproc getBatch Sample", - "origin": "API_DEFINITION", - "description": " Gets the batch workload resource representation.", - "canonical": true, - "file": "batch_controller.get_batch.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 53, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "GetBatch", - "fullName": "google.cloud.dataproc.v1.BatchController.GetBatch", - "async": true, - "parameters": [ - { - "name": "name", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.dataproc.v1.Batch", - "client": { - "shortName": "BatchControllerClient", - "fullName": "google.cloud.dataproc.v1.BatchControllerClient" - }, - "method": { - "shortName": "GetBatch", - "fullName": "google.cloud.dataproc.v1.BatchController.GetBatch", - "service": { - "shortName": "BatchController", - "fullName": "google.cloud.dataproc.v1.BatchController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_BatchController_ListBatches_async", - "title": "dataproc listBatches Sample", - "origin": "API_DEFINITION", - "description": " Lists batch workloads.", - "canonical": true, - "file": "batch_controller.list_batches.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 66, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "ListBatches", - "fullName": "google.cloud.dataproc.v1.BatchController.ListBatches", - "async": true, - "parameters": [ - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "page_size", - "type": "TYPE_INT32" - }, - { - "name": "page_token", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.dataproc.v1.ListBatchesResponse", - "client": { - "shortName": "BatchControllerClient", - "fullName": "google.cloud.dataproc.v1.BatchControllerClient" - }, - "method": { - "shortName": "ListBatches", - "fullName": "google.cloud.dataproc.v1.BatchController.ListBatches", - "service": { - "shortName": "BatchController", - "fullName": "google.cloud.dataproc.v1.BatchController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_BatchController_DeleteBatch_async", - "title": "dataproc deleteBatch Sample", - "origin": "API_DEFINITION", - "description": " Deletes the batch workload resource. If the batch is not in terminal state, the delete fails and the response returns `FAILED_PRECONDITION`.", - "canonical": true, - "file": "batch_controller.delete_batch.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 53, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "DeleteBatch", - "fullName": "google.cloud.dataproc.v1.BatchController.DeleteBatch", - "async": true, - "parameters": [ - { - "name": "name", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.protobuf.Empty", - "client": { - "shortName": "BatchControllerClient", - "fullName": "google.cloud.dataproc.v1.BatchControllerClient" - }, - "method": { - "shortName": "DeleteBatch", - "fullName": "google.cloud.dataproc.v1.BatchController.DeleteBatch", - "service": { - "shortName": "BatchController", - "fullName": "google.cloud.dataproc.v1.BatchController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_ClusterController_CreateCluster_async", - "title": "dataproc createCluster Sample", - "origin": "API_DEFINITION", - "description": " Creates a cluster in a project. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).", - "canonical": true, - "file": "cluster_controller.create_cluster.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 82, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "CreateCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.CreateCluster", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "cluster", - "type": ".google.cloud.dataproc.v1.Cluster" - }, - { - "name": "request_id", - "type": "TYPE_STRING" - }, - { - "name": "action_on_failed_primary_workers", - "type": ".google.cloud.dataproc.v1.FailureAction" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "ClusterControllerClient", - "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" - }, - "method": { - "shortName": "CreateCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.CreateCluster", - "service": { - "shortName": "ClusterController", - "fullName": "google.cloud.dataproc.v1.ClusterController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_ClusterController_UpdateCluster_async", - "title": "dataproc updateCluster Sample", - "origin": "API_DEFINITION", - "description": " Updates a cluster in a project. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). The cluster must be in a [`RUNNING`][google.cloud.dataproc.v1.ClusterStatus.State] state or an error is returned.", - "canonical": true, - "file": "cluster_controller.update_cluster.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 146, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "UpdateCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.UpdateCluster", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "cluster_name", - "type": "TYPE_STRING" - }, - { - "name": "cluster", - "type": ".google.cloud.dataproc.v1.Cluster" - }, - { - "name": "graceful_decommission_timeout", - "type": ".google.protobuf.Duration" - }, - { - "name": "update_mask", - "type": ".google.protobuf.FieldMask" - }, - { - "name": "request_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "ClusterControllerClient", - "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" - }, - "method": { - "shortName": "UpdateCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.UpdateCluster", - "service": { - "shortName": "ClusterController", - "fullName": "google.cloud.dataproc.v1.ClusterController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_ClusterController_StopCluster_async", - "title": "dataproc stopCluster Sample", - "origin": "API_DEFINITION", - "description": " Stops a cluster in a project.", - "canonical": true, - "file": "cluster_controller.stop_cluster.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 83, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "StopCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.StopCluster", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "cluster_name", - "type": "TYPE_STRING" - }, - { - "name": "cluster_uuid", - "type": "TYPE_STRING" - }, - { - "name": "request_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "ClusterControllerClient", - "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" - }, - "method": { - "shortName": "StopCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.StopCluster", - "service": { - "shortName": "ClusterController", - "fullName": "google.cloud.dataproc.v1.ClusterController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_ClusterController_StartCluster_async", - "title": "dataproc startCluster Sample", - "origin": "API_DEFINITION", - "description": " Starts a cluster in a project.", - "canonical": true, - "file": "cluster_controller.start_cluster.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 83, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "StartCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.StartCluster", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "cluster_name", - "type": "TYPE_STRING" - }, - { - "name": "cluster_uuid", - "type": "TYPE_STRING" - }, - { - "name": "request_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "ClusterControllerClient", - "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" - }, - "method": { - "shortName": "StartCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.StartCluster", - "service": { - "shortName": "ClusterController", - "fullName": "google.cloud.dataproc.v1.ClusterController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_ClusterController_DeleteCluster_async", - "title": "dataproc deleteCluster Sample", - "origin": "API_DEFINITION", - "description": " Deletes a cluster in a project. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).", - "canonical": true, - "file": "cluster_controller.delete_cluster.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 83, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "DeleteCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.DeleteCluster", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "cluster_name", - "type": "TYPE_STRING" - }, - { - "name": "cluster_uuid", - "type": "TYPE_STRING" - }, - { - "name": "request_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "ClusterControllerClient", - "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" - }, - "method": { - "shortName": "DeleteCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.DeleteCluster", - "service": { - "shortName": "ClusterController", - "fullName": "google.cloud.dataproc.v1.ClusterController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_ClusterController_GetCluster_async", - "title": "dataproc getCluster Sample", - "origin": "API_DEFINITION", - "description": " Gets the resource representation for a cluster in a project.", - "canonical": true, - "file": "cluster_controller.get_cluster.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 64, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "GetCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.GetCluster", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "cluster_name", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.dataproc.v1.Cluster", - "client": { - "shortName": "ClusterControllerClient", - "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" - }, - "method": { - "shortName": "GetCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.GetCluster", - "service": { - "shortName": "ClusterController", - "fullName": "google.cloud.dataproc.v1.ClusterController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_ClusterController_ListClusters_async", - "title": "dataproc listClusters Sample", - "origin": "API_DEFINITION", - "description": " Lists all regions/{region}/clusters in a project alphabetically.", - "canonical": true, - "file": "cluster_controller.list_clusters.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 87, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "ListClusters", - "fullName": "google.cloud.dataproc.v1.ClusterController.ListClusters", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "filter", - "type": "TYPE_STRING" - }, - { - "name": "page_size", - "type": "TYPE_INT32" - }, - { - "name": "page_token", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.dataproc.v1.ListClustersResponse", - "client": { - "shortName": "ClusterControllerClient", - "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" - }, - "method": { - "shortName": "ListClusters", - "fullName": "google.cloud.dataproc.v1.ClusterController.ListClusters", - "service": { - "shortName": "ClusterController", - "fullName": "google.cloud.dataproc.v1.ClusterController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_ClusterController_DiagnoseCluster_async", - "title": "dataproc diagnoseCluster Sample", - "origin": "API_DEFINITION", - "description": " Gets cluster diagnostic information. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). After the operation completes, [Operation.response][google.longrunning.Operation.response] contains [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).", - "canonical": true, - "file": "cluster_controller.diagnose_cluster.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 65, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "DiagnoseCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.DiagnoseCluster", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "cluster_name", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "ClusterControllerClient", - "fullName": "google.cloud.dataproc.v1.ClusterControllerClient" - }, - "method": { - "shortName": "DiagnoseCluster", - "fullName": "google.cloud.dataproc.v1.ClusterController.DiagnoseCluster", - "service": { - "shortName": "ClusterController", - "fullName": "google.cloud.dataproc.v1.ClusterController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_JobController_SubmitJob_async", - "title": "dataproc submitJob Sample", - "origin": "API_DEFINITION", - "description": " Submits a job to a cluster.", - "canonical": true, - "file": "job_controller.submit_job.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 77, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "SubmitJob", - "fullName": "google.cloud.dataproc.v1.JobController.SubmitJob", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "job", - "type": ".google.cloud.dataproc.v1.Job" - }, - { - "name": "request_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.dataproc.v1.Job", - "client": { - "shortName": "JobControllerClient", - "fullName": "google.cloud.dataproc.v1.JobControllerClient" - }, - "method": { - "shortName": "SubmitJob", - "fullName": "google.cloud.dataproc.v1.JobController.SubmitJob", - "service": { - "shortName": "JobController", - "fullName": "google.cloud.dataproc.v1.JobController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_JobController_SubmitJobAsOperation_async", - "title": "dataproc submitJobAsOperation Sample", - "origin": "API_DEFINITION", - "description": " Submits job to a cluster.", - "canonical": true, - "file": "job_controller.submit_job_as_operation.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 78, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "SubmitJobAsOperation", - "fullName": "google.cloud.dataproc.v1.JobController.SubmitJobAsOperation", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "job", - "type": ".google.cloud.dataproc.v1.Job" - }, - { - "name": "request_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "JobControllerClient", - "fullName": "google.cloud.dataproc.v1.JobControllerClient" - }, - "method": { - "shortName": "SubmitJobAsOperation", - "fullName": "google.cloud.dataproc.v1.JobController.SubmitJobAsOperation", - "service": { - "shortName": "JobController", - "fullName": "google.cloud.dataproc.v1.JobController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_JobController_GetJob_async", - "title": "dataproc getJob Sample", - "origin": "API_DEFINITION", - "description": " Gets the resource representation for a job in a project.", - "canonical": true, - "file": "job_controller.get_job.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 64, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "GetJob", - "fullName": "google.cloud.dataproc.v1.JobController.GetJob", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "job_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.dataproc.v1.Job", - "client": { - "shortName": "JobControllerClient", - "fullName": "google.cloud.dataproc.v1.JobControllerClient" - }, - "method": { - "shortName": "GetJob", - "fullName": "google.cloud.dataproc.v1.JobController.GetJob", - "service": { - "shortName": "JobController", - "fullName": "google.cloud.dataproc.v1.JobController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_JobController_ListJobs_async", - "title": "dataproc listJobs Sample", - "origin": "API_DEFINITION", - "description": " Lists regions/{region}/jobs in a project.", - "canonical": true, - "file": "job_controller.list_jobs.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 94, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "ListJobs", - "fullName": "google.cloud.dataproc.v1.JobController.ListJobs", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "page_size", - "type": "TYPE_INT32" - }, - { - "name": "page_token", - "type": "TYPE_STRING" - }, - { - "name": "cluster_name", - "type": "TYPE_STRING" - }, - { - "name": "job_state_matcher", - "type": ".google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher" - }, - { - "name": "filter", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.dataproc.v1.ListJobsResponse", - "client": { - "shortName": "JobControllerClient", - "fullName": "google.cloud.dataproc.v1.JobControllerClient" - }, - "method": { - "shortName": "ListJobs", - "fullName": "google.cloud.dataproc.v1.JobController.ListJobs", - "service": { - "shortName": "JobController", - "fullName": "google.cloud.dataproc.v1.JobController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_JobController_UpdateJob_async", - "title": "dataproc updateJob Sample", - "origin": "API_DEFINITION", - "description": " Updates a job in a project.", - "canonical": true, - "file": "job_controller.update_job.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 79, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "UpdateJob", - "fullName": "google.cloud.dataproc.v1.JobController.UpdateJob", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "job_id", - "type": "TYPE_STRING" - }, - { - "name": "job", - "type": ".google.cloud.dataproc.v1.Job" - }, - { - "name": "update_mask", - "type": ".google.protobuf.FieldMask" - } - ], - "resultType": ".google.cloud.dataproc.v1.Job", - "client": { - "shortName": "JobControllerClient", - "fullName": "google.cloud.dataproc.v1.JobControllerClient" - }, - "method": { - "shortName": "UpdateJob", - "fullName": "google.cloud.dataproc.v1.JobController.UpdateJob", - "service": { - "shortName": "JobController", - "fullName": "google.cloud.dataproc.v1.JobController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_JobController_CancelJob_async", - "title": "dataproc cancelJob Sample", - "origin": "API_DEFINITION", - "description": " Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).", - "canonical": true, - "file": "job_controller.cancel_job.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 64, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "CancelJob", - "fullName": "google.cloud.dataproc.v1.JobController.CancelJob", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "job_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.dataproc.v1.Job", - "client": { - "shortName": "JobControllerClient", - "fullName": "google.cloud.dataproc.v1.JobControllerClient" - }, - "method": { - "shortName": "CancelJob", - "fullName": "google.cloud.dataproc.v1.JobController.CancelJob", - "service": { - "shortName": "JobController", - "fullName": "google.cloud.dataproc.v1.JobController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_JobController_DeleteJob_async", - "title": "dataproc deleteJob Sample", - "origin": "API_DEFINITION", - "description": " Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.", - "canonical": true, - "file": "job_controller.delete_job.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 64, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "DeleteJob", - "fullName": "google.cloud.dataproc.v1.JobController.DeleteJob", - "async": true, - "parameters": [ - { - "name": "project_id", - "type": "TYPE_STRING" - }, - { - "name": "region", - "type": "TYPE_STRING" - }, - { - "name": "job_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.protobuf.Empty", - "client": { - "shortName": "JobControllerClient", - "fullName": "google.cloud.dataproc.v1.JobControllerClient" - }, - "method": { - "shortName": "DeleteJob", - "fullName": "google.cloud.dataproc.v1.JobController.DeleteJob", - "service": { - "shortName": "JobController", - "fullName": "google.cloud.dataproc.v1.JobController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async", - "title": "dataproc createNodeGroup Sample", - "origin": "API_DEFINITION", - "description": " Creates a node group in a cluster. The returned [Operation.metadata][google.longrunning.Operation.metadata] is [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", - "canonical": true, - "file": "node_group_controller.create_node_group.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 80, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "CreateNodeGroup", - "fullName": "google.cloud.dataproc.v1.NodeGroupController.CreateNodeGroup", - "async": true, - "parameters": [ - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "node_group", - "type": ".google.cloud.dataproc.v1.NodeGroup" - }, - { - "name": "node_group_id", - "type": "TYPE_STRING" - }, - { - "name": "request_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "NodeGroupControllerClient", - "fullName": "google.cloud.dataproc.v1.NodeGroupControllerClient" - }, - "method": { - "shortName": "CreateNodeGroup", - "fullName": "google.cloud.dataproc.v1.NodeGroupController.CreateNodeGroup", - "service": { - "shortName": "NodeGroupController", - "fullName": "google.cloud.dataproc.v1.NodeGroupController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async", - "title": "dataproc resizeNodeGroup Sample", - "origin": "API_DEFINITION", - "description": " Resizes a node group in a cluster. The returned [Operation.metadata][google.longrunning.Operation.metadata] is [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", - "canonical": true, - "file": "node_group_controller.resize_node_group.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 90, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "ResizeNodeGroup", - "fullName": "google.cloud.dataproc.v1.NodeGroupController.ResizeNodeGroup", - "async": true, - "parameters": [ - { - "name": "name", - "type": "TYPE_STRING" - }, - { - "name": "size", - "type": "TYPE_INT32" - }, - { - "name": "request_id", - "type": "TYPE_STRING" - }, - { - "name": "graceful_decommission_timeout", - "type": ".google.protobuf.Duration" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "NodeGroupControllerClient", - "fullName": "google.cloud.dataproc.v1.NodeGroupControllerClient" - }, - "method": { - "shortName": "ResizeNodeGroup", - "fullName": "google.cloud.dataproc.v1.NodeGroupController.ResizeNodeGroup", - "service": { - "shortName": "NodeGroupController", - "fullName": "google.cloud.dataproc.v1.NodeGroupController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_NodeGroupController_GetNodeGroup_async", - "title": "dataproc getNodeGroup Sample", - "origin": "API_DEFINITION", - "description": " Gets the resource representation for a node group in a cluster.", - "canonical": true, - "file": "node_group_controller.get_node_group.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 55, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "GetNodeGroup", - "fullName": "google.cloud.dataproc.v1.NodeGroupController.GetNodeGroup", - "async": true, - "parameters": [ - { - "name": "name", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.dataproc.v1.NodeGroup", - "client": { - "shortName": "NodeGroupControllerClient", - "fullName": "google.cloud.dataproc.v1.NodeGroupControllerClient" - }, - "method": { - "shortName": "GetNodeGroup", - "fullName": "google.cloud.dataproc.v1.NodeGroupController.GetNodeGroup", - "service": { - "shortName": "NodeGroupController", - "fullName": "google.cloud.dataproc.v1.NodeGroupController" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_WorkflowTemplateService_CreateWorkflowTemplate_async", - "title": "dataproc createWorkflowTemplate Sample", - "origin": "API_DEFINITION", - "description": " Creates new workflow template.", - "canonical": true, - "file": "workflow_template_service.create_workflow_template.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 65, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "CreateWorkflowTemplate", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate", - "async": true, - "parameters": [ - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "template", - "type": ".google.cloud.dataproc.v1.WorkflowTemplate" - } - ], - "resultType": ".google.cloud.dataproc.v1.WorkflowTemplate", - "client": { - "shortName": "WorkflowTemplateServiceClient", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" - }, - "method": { - "shortName": "CreateWorkflowTemplate", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate", - "service": { - "shortName": "WorkflowTemplateService", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_WorkflowTemplateService_GetWorkflowTemplate_async", - "title": "dataproc getWorkflowTemplate Sample", - "origin": "API_DEFINITION", - "description": " Retrieves the latest workflow template. Can retrieve previously instantiated template by specifying optional version parameter.", - "canonical": true, - "file": "workflow_template_service.get_workflow_template.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 66, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "GetWorkflowTemplate", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.GetWorkflowTemplate", - "async": true, - "parameters": [ - { - "name": "name", - "type": "TYPE_STRING" - }, - { - "name": "version", - "type": "TYPE_INT32" - } - ], - "resultType": ".google.cloud.dataproc.v1.WorkflowTemplate", - "client": { - "shortName": "WorkflowTemplateServiceClient", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" - }, - "method": { - "shortName": "GetWorkflowTemplate", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.GetWorkflowTemplate", - "service": { - "shortName": "WorkflowTemplateService", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_WorkflowTemplateService_InstantiateWorkflowTemplate_async", - "title": "dataproc instantiateWorkflowTemplate Sample", - "origin": "API_DEFINITION", - "description": " Instantiates a template and begins execution. The returned Operation can be used to track execution of workflow by polling [operations.get][google.longrunning.Operations.GetOperation]. The Operation will complete when entire workflow is finished. The running workflow can be aborted via [operations.cancel][google.longrunning.Operations.CancelOperation]. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted. The [Operation.metadata][google.longrunning.Operation.metadata] will be [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see [Using WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). On successful completion, [Operation.response][google.longrunning.Operation.response] will be [Empty][google.protobuf.Empty].", - "canonical": true, - "file": "workflow_template_service.instantiate_workflow_template.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 84, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "InstantiateWorkflowTemplate", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate", - "async": true, - "parameters": [ - { - "name": "name", - "type": "TYPE_STRING" - }, - { - "name": "version", - "type": "TYPE_INT32" - }, - { - "name": "request_id", - "type": "TYPE_STRING" - }, - { - "name": "parameters", - "type": "TYPE_MESSAGE[]" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "WorkflowTemplateServiceClient", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" - }, - "method": { - "shortName": "InstantiateWorkflowTemplate", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate", - "service": { - "shortName": "WorkflowTemplateService", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_WorkflowTemplateService_InstantiateInlineWorkflowTemplate_async", - "title": "dataproc instantiateInlineWorkflowTemplate Sample", - "origin": "API_DEFINITION", - "description": " Instantiates a template and begins execution. This method is equivalent to executing the sequence [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. The returned Operation can be used to track execution of workflow by polling [operations.get][google.longrunning.Operations.GetOperation]. The Operation will complete when entire workflow is finished. The running workflow can be aborted via [operations.cancel][google.longrunning.Operations.CancelOperation]. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted. The [Operation.metadata][google.longrunning.Operation.metadata] will be [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see [Using WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). On successful completion, [Operation.response][google.longrunning.Operation.response] will be [Empty][google.protobuf.Empty].", - "canonical": true, - "file": "workflow_template_service.instantiate_inline_workflow_template.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 76, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "InstantiateInlineWorkflowTemplate", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateInlineWorkflowTemplate", - "async": true, - "parameters": [ - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "template", - "type": ".google.cloud.dataproc.v1.WorkflowTemplate" - }, - { - "name": "request_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "WorkflowTemplateServiceClient", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" - }, - "method": { - "shortName": "InstantiateInlineWorkflowTemplate", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateInlineWorkflowTemplate", - "service": { - "shortName": "WorkflowTemplateService", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_WorkflowTemplateService_UpdateWorkflowTemplate_async", - "title": "dataproc updateWorkflowTemplate Sample", - "origin": "API_DEFINITION", - "description": " Updates (replaces) workflow template. The updated template must contain version that matches the current server version.", - "canonical": true, - "file": "workflow_template_service.update_workflow_template.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 54, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "UpdateWorkflowTemplate", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.UpdateWorkflowTemplate", - "async": true, - "parameters": [ - { - "name": "template", - "type": ".google.cloud.dataproc.v1.WorkflowTemplate" - } - ], - "resultType": ".google.cloud.dataproc.v1.WorkflowTemplate", - "client": { - "shortName": "WorkflowTemplateServiceClient", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" - }, - "method": { - "shortName": "UpdateWorkflowTemplate", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.UpdateWorkflowTemplate", - "service": { - "shortName": "WorkflowTemplateService", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_WorkflowTemplateService_ListWorkflowTemplates_async", - "title": "dataproc listWorkflowTemplates Sample", - "origin": "API_DEFINITION", - "description": " Lists workflows that match the specified filter in the request.", - "canonical": true, - "file": "workflow_template_service.list_workflow_templates.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 71, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "ListWorkflowTemplates", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.ListWorkflowTemplates", - "async": true, - "parameters": [ - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "page_size", - "type": "TYPE_INT32" - }, - { - "name": "page_token", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.dataproc.v1.ListWorkflowTemplatesResponse", - "client": { - "shortName": "WorkflowTemplateServiceClient", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" - }, - "method": { - "shortName": "ListWorkflowTemplates", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.ListWorkflowTemplates", - "service": { - "shortName": "WorkflowTemplateService", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" - } - } - } - }, - { - "regionTag": "dataproc_v1_generated_WorkflowTemplateService_DeleteWorkflowTemplate_async", - "title": "dataproc deleteWorkflowTemplate Sample", - "origin": "API_DEFINITION", - "description": " Deletes a workflow template. It does not cancel in-progress workflows.", - "canonical": true, - "file": "workflow_template_service.delete_workflow_template.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 66, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "DeleteWorkflowTemplate", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate", - "async": true, - "parameters": [ - { - "name": "name", - "type": "TYPE_STRING" - }, - { - "name": "version", - "type": "TYPE_INT32" - } - ], - "resultType": ".google.protobuf.Empty", - "client": { - "shortName": "WorkflowTemplateServiceClient", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateServiceClient" - }, - "method": { - "shortName": "DeleteWorkflowTemplate", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate", - "service": { - "shortName": "WorkflowTemplateService", - "fullName": "google.cloud.dataproc.v1.WorkflowTemplateService" - } - } - } - } - ] -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.create_workflow_template.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.create_workflow_template.js deleted file mode 100644 index 7f929bc072e..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.create_workflow_template.js +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(parent, template) { - // [START dataproc_v1_generated_WorkflowTemplateService_CreateWorkflowTemplate_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The resource name of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * * For `projects.regions.workflowTemplates.create`, the resource name of the - * region has the following format: - * `projects/{project_id}/regions/{region}` - * * For `projects.locations.workflowTemplates.create`, the resource name of - * the location has the following format: - * `projects/{project_id}/locations/{location}` - */ - // const parent = 'abc123' - /** - * Required. The Dataproc workflow template to create. - */ - // const template = {} - - // Imports the Dataproc library - const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new WorkflowTemplateServiceClient(); - - async function callCreateWorkflowTemplate() { - // Construct request - const request = { - parent, - template, - }; - - // Run request - const response = await dataprocClient.createWorkflowTemplate(request); - console.log(response); - } - - callCreateWorkflowTemplate(); - // [END dataproc_v1_generated_WorkflowTemplateService_CreateWorkflowTemplate_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.delete_workflow_template.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.delete_workflow_template.js deleted file mode 100644 index 5382da622d3..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.delete_workflow_template.js +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(name) { - // [START dataproc_v1_generated_WorkflowTemplateService_DeleteWorkflowTemplate_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The resource name of the workflow template, as described - * in https://cloud.google.com/apis/design/resource_names. - * * For `projects.regions.workflowTemplates.delete`, the resource name - * of the template has the following format: - * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` - * * For `projects.locations.workflowTemplates.instantiate`, the resource name - * of the template has the following format: - * `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` - */ - // const name = 'abc123' - /** - * Optional. The version of workflow template to delete. If specified, - * will only delete the template if the current server version matches - * specified version. - */ - // const version = 1234 - - // Imports the Dataproc library - const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new WorkflowTemplateServiceClient(); - - async function callDeleteWorkflowTemplate() { - // Construct request - const request = { - name, - }; - - // Run request - const response = await dataprocClient.deleteWorkflowTemplate(request); - console.log(response); - } - - callDeleteWorkflowTemplate(); - // [END dataproc_v1_generated_WorkflowTemplateService_DeleteWorkflowTemplate_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.get_workflow_template.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.get_workflow_template.js deleted file mode 100644 index e1aab5cc0fc..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.get_workflow_template.js +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(name) { - // [START dataproc_v1_generated_WorkflowTemplateService_GetWorkflowTemplate_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The resource name of the workflow template, as described - * in https://cloud.google.com/apis/design/resource_names. - * * For `projects.regions.workflowTemplates.get`, the resource name of the - * template has the following format: - * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` - * * For `projects.locations.workflowTemplates.get`, the resource name of the - * template has the following format: - * `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` - */ - // const name = 'abc123' - /** - * Optional. The version of workflow template to retrieve. Only previously - * instantiated versions can be retrieved. - * If unspecified, retrieves the current version. - */ - // const version = 1234 - - // Imports the Dataproc library - const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new WorkflowTemplateServiceClient(); - - async function callGetWorkflowTemplate() { - // Construct request - const request = { - name, - }; - - // Run request - const response = await dataprocClient.getWorkflowTemplate(request); - console.log(response); - } - - callGetWorkflowTemplate(); - // [END dataproc_v1_generated_WorkflowTemplateService_GetWorkflowTemplate_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_inline_workflow_template.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_inline_workflow_template.js deleted file mode 100644 index e7d46fa688f..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_inline_workflow_template.js +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(parent, template) { - // [START dataproc_v1_generated_WorkflowTemplateService_InstantiateInlineWorkflowTemplate_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The resource name of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * * For `projects.regions.workflowTemplates,instantiateinline`, the resource - * name of the region has the following format: - * `projects/{project_id}/regions/{region}` - * * For `projects.locations.workflowTemplates.instantiateinline`, the - * resource name of the location has the following format: - * `projects/{project_id}/locations/{location}` - */ - // const parent = 'abc123' - /** - * Required. The workflow template to instantiate. - */ - // const template = {} - /** - * Optional. A tag that prevents multiple concurrent workflow - * instances with the same tag from running. This mitigates risk of - * concurrent instances started due to retries. - * It is recommended to always set this value to a - * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). - * The tag must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - */ - // const requestId = 'abc123' - - // Imports the Dataproc library - const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new WorkflowTemplateServiceClient(); - - async function callInstantiateInlineWorkflowTemplate() { - // Construct request - const request = { - parent, - template, - }; - - // Run request - const [operation] = await dataprocClient.instantiateInlineWorkflowTemplate(request); - const [response] = await operation.promise(); - console.log(response); - } - - callInstantiateInlineWorkflowTemplate(); - // [END dataproc_v1_generated_WorkflowTemplateService_InstantiateInlineWorkflowTemplate_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_workflow_template.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_workflow_template.js deleted file mode 100644 index cd4efec87e6..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.instantiate_workflow_template.js +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(name) { - // [START dataproc_v1_generated_WorkflowTemplateService_InstantiateWorkflowTemplate_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The resource name of the workflow template, as described - * in https://cloud.google.com/apis/design/resource_names. - * * For `projects.regions.workflowTemplates.instantiate`, the resource name - * of the template has the following format: - * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` - * * For `projects.locations.workflowTemplates.instantiate`, the resource name - * of the template has the following format: - * `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` - */ - // const name = 'abc123' - /** - * Optional. The version of workflow template to instantiate. If specified, - * the workflow will be instantiated only if the current version of - * the workflow template has the supplied version. - * This option cannot be used to instantiate a previous version of - * workflow template. - */ - // const version = 1234 - /** - * Optional. A tag that prevents multiple concurrent workflow - * instances with the same tag from running. This mitigates risk of - * concurrent instances started due to retries. - * It is recommended to always set this value to a - * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). - * The tag must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - */ - // const requestId = 'abc123' - /** - * Optional. Map from parameter names to values that should be used for those - * parameters. Values may not exceed 1000 characters. - */ - // const parameters = 1234 - - // Imports the Dataproc library - const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new WorkflowTemplateServiceClient(); - - async function callInstantiateWorkflowTemplate() { - // Construct request - const request = { - name, - }; - - // Run request - const [operation] = await dataprocClient.instantiateWorkflowTemplate(request); - const [response] = await operation.promise(); - console.log(response); - } - - callInstantiateWorkflowTemplate(); - // [END dataproc_v1_generated_WorkflowTemplateService_InstantiateWorkflowTemplate_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.list_workflow_templates.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.list_workflow_templates.js deleted file mode 100644 index 31c6c24a4e6..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.list_workflow_templates.js +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(parent) { - // [START dataproc_v1_generated_WorkflowTemplateService_ListWorkflowTemplates_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The resource name of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * * For `projects.regions.workflowTemplates,list`, the resource - * name of the region has the following format: - * `projects/{project_id}/regions/{region}` - * * For `projects.locations.workflowTemplates.list`, the - * resource name of the location has the following format: - * `projects/{project_id}/locations/{location}` - */ - // const parent = 'abc123' - /** - * Optional. The maximum number of results to return in each response. - */ - // const pageSize = 1234 - /** - * Optional. The page token, returned by a previous call, to request the - * next page of results. - */ - // const pageToken = 'abc123' - - // Imports the Dataproc library - const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new WorkflowTemplateServiceClient(); - - async function callListWorkflowTemplates() { - // Construct request - const request = { - parent, - }; - - // Run request - const iterable = await dataprocClient.listWorkflowTemplatesAsync(request); - for await (const response of iterable) { - console.log(response); - } - } - - callListWorkflowTemplates(); - // [END dataproc_v1_generated_WorkflowTemplateService_ListWorkflowTemplates_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.update_workflow_template.js b/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.update_workflow_template.js deleted file mode 100644 index df1ef7ef62a..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/workflow_template_service.update_workflow_template.js +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(template) { - // [START dataproc_v1_generated_WorkflowTemplateService_UpdateWorkflowTemplate_async] - /** - * This snippet has been automatically generated and should be regarded as a code template only. - * It will require modifications to work. - * It may require correct/in-range values for request initialization. - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The updated workflow template. - * The `template.version` field must match the current version. - */ - // const template = {} - - // Imports the Dataproc library - const {WorkflowTemplateServiceClient} = require('@google-cloud/dataproc').v1; - - // Instantiates a client - const dataprocClient = new WorkflowTemplateServiceClient(); - - async function callUpdateWorkflowTemplate() { - // Construct request - const request = { - template, - }; - - // Run request - const response = await dataprocClient.updateWorkflowTemplate(request); - console.log(response); - } - - callUpdateWorkflowTemplate(); - // [END dataproc_v1_generated_WorkflowTemplateService_UpdateWorkflowTemplate_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/index.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/index.ts deleted file mode 100644 index 3ef9be2ad8b..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/index.ts +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as v1 from './v1'; -const AutoscalingPolicyServiceClient = v1.AutoscalingPolicyServiceClient; -type AutoscalingPolicyServiceClient = v1.AutoscalingPolicyServiceClient; -const BatchControllerClient = v1.BatchControllerClient; -type BatchControllerClient = v1.BatchControllerClient; -const ClusterControllerClient = v1.ClusterControllerClient; -type ClusterControllerClient = v1.ClusterControllerClient; -const JobControllerClient = v1.JobControllerClient; -type JobControllerClient = v1.JobControllerClient; -const NodeGroupControllerClient = v1.NodeGroupControllerClient; -type NodeGroupControllerClient = v1.NodeGroupControllerClient; -const WorkflowTemplateServiceClient = v1.WorkflowTemplateServiceClient; -type WorkflowTemplateServiceClient = v1.WorkflowTemplateServiceClient; -export {v1, AutoscalingPolicyServiceClient, BatchControllerClient, ClusterControllerClient, JobControllerClient, NodeGroupControllerClient, WorkflowTemplateServiceClient}; -export default {v1, AutoscalingPolicyServiceClient, BatchControllerClient, ClusterControllerClient, JobControllerClient, NodeGroupControllerClient, WorkflowTemplateServiceClient}; -import * as protos from '../protos/protos'; -export {protos} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client.ts deleted file mode 100644 index 54b02c1e947..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client.ts +++ /dev/null @@ -1,1240 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import type * as gax from 'google-gax'; -import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; -import {Transform} from 'stream'; -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1/autoscaling_policy_service_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './autoscaling_policy_service_client_config.json'; -const version = require('../../../package.json').version; - -/** - * The API interface for managing autoscaling policies in the - * Dataproc API. - * @class - * @memberof v1 - */ -export class AutoscalingPolicyServiceClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - pathTemplates: {[name: string]: gax.PathTemplate}; - autoscalingPolicyServiceStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of AutoscalingPolicyServiceClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you - * need to avoid loading the default gRPC version and want to use the fallback - * HTTP implementation. Load only fallback version and pass it to the constructor: - * ``` - * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC - * const client = new AutoscalingPolicyServiceClient({fallback: 'rest'}, gax); - * ``` - */ - constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof AutoscalingPolicyServiceClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // Request numeric enum values if REST transport is used. - opts.numericEnums = true; - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Load google-gax module synchronously if needed - if (!gaxInstance) { - gaxInstance = require('google-gax') as typeof gax; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - // This API contains "path templates"; forward-slash-separated - // identifiers to uniquely identify resources within the API. - // Create useful helper objects for these. - this.pathTemplates = { - batchPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/batches/{batch}' - ), - locationPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}' - ), - nodeGroupPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' - ), - projectPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}' - ), - projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' - ), - projectLocationWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/workflowTemplates/{workflow_template}' - ), - projectRegionAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}' - ), - projectRegionWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' - ), - }; - - // Some of the methods on this service return "paged" results, - // (e.g. 50 results at a time, with tokens to get subsequent - // pages). Denote the keys used for pagination and results. - this.descriptors.page = { - listAutoscalingPolicies: - new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'policies') - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.dataproc.v1.AutoscalingPolicyService', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = this._gaxModule.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.autoscalingPolicyServiceStub) { - return this.autoscalingPolicyServiceStub; - } - - // Put together the "service stub" for - // google.cloud.dataproc.v1.AutoscalingPolicyService. - this.autoscalingPolicyServiceStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.dataproc.v1.AutoscalingPolicyService') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.dataproc.v1.AutoscalingPolicyService, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const autoscalingPolicyServiceStubMethods = - ['createAutoscalingPolicy', 'updateAutoscalingPolicy', 'getAutoscalingPolicy', 'listAutoscalingPolicies', 'deleteAutoscalingPolicy']; - for (const methodName of autoscalingPolicyServiceStubMethods) { - const callPromise = this.autoscalingPolicyServiceStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.page[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor, - this._opts.fallback - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.autoscalingPolicyServiceStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'dataproc.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'dataproc.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- -/** - * Creates new autoscaling policy. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The "resource name" of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.autoscalingPolicies.create`, the resource name - * of the region has the following format: - * `projects/{project_id}/regions/{region}` - * - * * For `projects.locations.autoscalingPolicies.create`, the resource name - * of the location has the following format: - * `projects/{project_id}/locations/{location}` - * @param {google.cloud.dataproc.v1.AutoscalingPolicy} request.policy - * Required. The autoscaling policy to create. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [AutoscalingPolicy]{@link google.cloud.dataproc.v1.AutoscalingPolicy}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/autoscaling_policy_service.create_autoscaling_policy.js - * region_tag:dataproc_v1_generated_AutoscalingPolicyService_CreateAutoscalingPolicy_async - */ - createAutoscalingPolicy( - request?: protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest|undefined, {}|undefined - ]>; - createAutoscalingPolicy( - request: protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>): void; - createAutoscalingPolicy( - request: protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest, - callback: Callback< - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>): void; - createAutoscalingPolicy( - request?: protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.ICreateAutoscalingPolicyRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - this.initialize(); - return this.innerApiCalls.createAutoscalingPolicy(request, options, callback); - } -/** - * Updates (replaces) autoscaling policy. - * - * Disabled check for update_mask, because all updates will be full - * replacements. - * - * @param {Object} request - * The request object that will be sent. - * @param {google.cloud.dataproc.v1.AutoscalingPolicy} request.policy - * Required. The updated autoscaling policy. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [AutoscalingPolicy]{@link google.cloud.dataproc.v1.AutoscalingPolicy}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/autoscaling_policy_service.update_autoscaling_policy.js - * region_tag:dataproc_v1_generated_AutoscalingPolicyService_UpdateAutoscalingPolicy_async - */ - updateAutoscalingPolicy( - request?: protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest|undefined, {}|undefined - ]>; - updateAutoscalingPolicy( - request: protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>): void; - updateAutoscalingPolicy( - request: protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest, - callback: Callback< - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>): void; - updateAutoscalingPolicy( - request?: protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.IUpdateAutoscalingPolicyRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'policy.name': request.policy!.name ?? '', - }); - this.initialize(); - return this.innerApiCalls.updateAutoscalingPolicy(request, options, callback); - } -/** - * Retrieves autoscaling policy. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.name - * Required. The "resource name" of the autoscaling policy, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.autoscalingPolicies.get`, the resource name - * of the policy has the following format: - * `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` - * - * * For `projects.locations.autoscalingPolicies.get`, the resource name - * of the policy has the following format: - * `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [AutoscalingPolicy]{@link google.cloud.dataproc.v1.AutoscalingPolicy}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/autoscaling_policy_service.get_autoscaling_policy.js - * region_tag:dataproc_v1_generated_AutoscalingPolicyService_GetAutoscalingPolicy_async - */ - getAutoscalingPolicy( - request?: protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest|undefined, {}|undefined - ]>; - getAutoscalingPolicy( - request: protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>): void; - getAutoscalingPolicy( - request: protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest, - callback: Callback< - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>): void; - getAutoscalingPolicy( - request?: protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.dataproc.v1.IAutoscalingPolicy, - protos.google.cloud.dataproc.v1.IGetAutoscalingPolicyRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'name': request.name ?? '', - }); - this.initialize(); - return this.innerApiCalls.getAutoscalingPolicy(request, options, callback); - } -/** - * Deletes an autoscaling policy. It is an error to delete an autoscaling - * policy that is in use by one or more clusters. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.name - * Required. The "resource name" of the autoscaling policy, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.autoscalingPolicies.delete`, the resource name - * of the policy has the following format: - * `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` - * - * * For `projects.locations.autoscalingPolicies.delete`, the resource name - * of the policy has the following format: - * `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}` - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/autoscaling_policy_service.delete_autoscaling_policy.js - * region_tag:dataproc_v1_generated_AutoscalingPolicyService_DeleteAutoscalingPolicy_async - */ - deleteAutoscalingPolicy( - request?: protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest, - options?: CallOptions): - Promise<[ - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest|undefined, {}|undefined - ]>; - deleteAutoscalingPolicy( - request: protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest, - options: CallOptions, - callback: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>): void; - deleteAutoscalingPolicy( - request: protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest, - callback: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>): void; - deleteAutoscalingPolicy( - request?: protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteAutoscalingPolicyRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'name': request.name ?? '', - }); - this.initialize(); - return this.innerApiCalls.deleteAutoscalingPolicy(request, options, callback); - } - - /** - * Lists autoscaling policies in the project. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The "resource name" of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.autoscalingPolicies.list`, the resource name - * of the region has the following format: - * `projects/{project_id}/regions/{region}` - * - * * For `projects.locations.autoscalingPolicies.list`, the resource name - * of the location has the following format: - * `projects/{project_id}/locations/{location}` - * @param {number} [request.pageSize] - * Optional. The maximum number of results to return in each response. - * Must be less than or equal to 1000. Defaults to 100. - * @param {string} [request.pageToken] - * Optional. The page token, returned by a previous call, to request the - * next page of results. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is Array of [AutoscalingPolicy]{@link google.cloud.dataproc.v1.AutoscalingPolicy}. - * The client library will perform auto-pagination by default: it will call the API as many - * times as needed and will merge results from all the pages into this array. - * Note that it can affect your quota. - * We recommend using `listAutoscalingPoliciesAsync()` - * method described below for async iteration which you can stop as needed. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - */ - listAutoscalingPolicies( - request?: protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IAutoscalingPolicy[], - protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest|null, - protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesResponse - ]>; - listAutoscalingPolicies( - request: protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, - options: CallOptions, - callback: PaginationCallback< - protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, - protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesResponse|null|undefined, - protos.google.cloud.dataproc.v1.IAutoscalingPolicy>): void; - listAutoscalingPolicies( - request: protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, - callback: PaginationCallback< - protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, - protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesResponse|null|undefined, - protos.google.cloud.dataproc.v1.IAutoscalingPolicy>): void; - listAutoscalingPolicies( - request?: protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, - optionsOrCallback?: CallOptions|PaginationCallback< - protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, - protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesResponse|null|undefined, - protos.google.cloud.dataproc.v1.IAutoscalingPolicy>, - callback?: PaginationCallback< - protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, - protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesResponse|null|undefined, - protos.google.cloud.dataproc.v1.IAutoscalingPolicy>): - Promise<[ - protos.google.cloud.dataproc.v1.IAutoscalingPolicy[], - protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest|null, - protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesResponse - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - this.initialize(); - return this.innerApiCalls.listAutoscalingPolicies(request, options, callback); - } - -/** - * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The "resource name" of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.autoscalingPolicies.list`, the resource name - * of the region has the following format: - * `projects/{project_id}/regions/{region}` - * - * * For `projects.locations.autoscalingPolicies.list`, the resource name - * of the location has the following format: - * `projects/{project_id}/locations/{location}` - * @param {number} [request.pageSize] - * Optional. The maximum number of results to return in each response. - * Must be less than or equal to 1000. Defaults to 100. - * @param {string} [request.pageToken] - * Optional. The page token, returned by a previous call, to request the - * next page of results. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Stream} - * An object stream which emits an object representing [AutoscalingPolicy]{@link google.cloud.dataproc.v1.AutoscalingPolicy} on 'data' event. - * The client library will perform auto-pagination by default: it will call the API as many - * times as needed. Note that it can affect your quota. - * We recommend using `listAutoscalingPoliciesAsync()` - * method described below for async iteration which you can stop as needed. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - */ - listAutoscalingPoliciesStream( - request?: protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, - options?: CallOptions): - Transform{ - request = request || {}; - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - const defaultCallSettings = this._defaults['listAutoscalingPolicies']; - const callSettings = defaultCallSettings.merge(options); - this.initialize(); - return this.descriptors.page.listAutoscalingPolicies.createStream( - this.innerApiCalls.listAutoscalingPolicies as GaxCall, - request, - callSettings - ); - } - -/** - * Equivalent to `listAutoscalingPolicies`, but returns an iterable object. - * - * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The "resource name" of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.autoscalingPolicies.list`, the resource name - * of the region has the following format: - * `projects/{project_id}/regions/{region}` - * - * * For `projects.locations.autoscalingPolicies.list`, the resource name - * of the location has the following format: - * `projects/{project_id}/locations/{location}` - * @param {number} [request.pageSize] - * Optional. The maximum number of results to return in each response. - * Must be less than or equal to 1000. Defaults to 100. - * @param {string} [request.pageToken] - * Optional. The page token, returned by a previous call, to request the - * next page of results. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Object} - * An iterable Object that allows [async iteration](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols). - * When you iterate the returned iterable, each element will be an object representing - * [AutoscalingPolicy]{@link google.cloud.dataproc.v1.AutoscalingPolicy}. The API will be called under the hood as needed, once per the page, - * so you can stop the iteration when you don't need more results. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - * @example include:samples/generated/v1/autoscaling_policy_service.list_autoscaling_policies.js - * region_tag:dataproc_v1_generated_AutoscalingPolicyService_ListAutoscalingPolicies_async - */ - listAutoscalingPoliciesAsync( - request?: protos.google.cloud.dataproc.v1.IListAutoscalingPoliciesRequest, - options?: CallOptions): - AsyncIterable{ - request = request || {}; - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - const defaultCallSettings = this._defaults['listAutoscalingPolicies']; - const callSettings = defaultCallSettings.merge(options); - this.initialize(); - return this.descriptors.page.listAutoscalingPolicies.asyncIterate( - this.innerApiCalls['listAutoscalingPolicies'] as GaxCall, - request as {}, - callSettings - ) as AsyncIterable; - } - // -------------------- - // -- Path templates -- - // -------------------- - - /** - * Return a fully-qualified batch resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} batch - * @returns {string} Resource name string. - */ - batchPath(project:string,location:string,batch:string) { - return this.pathTemplates.batchPathTemplate.render({ - project: project, - location: location, - batch: batch, - }); - } - - /** - * Parse the project from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the project. - */ - matchProjectFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).project; - } - - /** - * Parse the location from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the location. - */ - matchLocationFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).location; - } - - /** - * Parse the batch from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the batch. - */ - matchBatchFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).batch; - } - - /** - * Return a fully-qualified location resource name string. - * - * @param {string} project - * @param {string} location - * @returns {string} Resource name string. - */ - locationPath(project:string,location:string) { - return this.pathTemplates.locationPathTemplate.render({ - project: project, - location: location, - }); - } - - /** - * Parse the project from Location resource. - * - * @param {string} locationName - * A fully-qualified path representing Location resource. - * @returns {string} A string representing the project. - */ - matchProjectFromLocationName(locationName: string) { - return this.pathTemplates.locationPathTemplate.match(locationName).project; - } - - /** - * Parse the location from Location resource. - * - * @param {string} locationName - * A fully-qualified path representing Location resource. - * @returns {string} A string representing the location. - */ - matchLocationFromLocationName(locationName: string) { - return this.pathTemplates.locationPathTemplate.match(locationName).location; - } - - /** - * Return a fully-qualified nodeGroup resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} cluster - * @param {string} node_group - * @returns {string} Resource name string. - */ - nodeGroupPath(project:string,region:string,cluster:string,nodeGroup:string) { - return this.pathTemplates.nodeGroupPathTemplate.render({ - project: project, - region: region, - cluster: cluster, - node_group: nodeGroup, - }); - } - - /** - * Parse the project from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the project. - */ - matchProjectFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).project; - } - - /** - * Parse the region from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the region. - */ - matchRegionFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; - } - - /** - * Parse the cluster from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the cluster. - */ - matchClusterFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).cluster; - } - - /** - * Parse the node_group from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the node_group. - */ - matchNodeGroupFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).node_group; - } - - /** - * Return a fully-qualified project resource name string. - * - * @param {string} project - * @returns {string} Resource name string. - */ - projectPath(project:string) { - return this.pathTemplates.projectPathTemplate.render({ - project: project, - }); - } - - /** - * Parse the project from Project resource. - * - * @param {string} projectName - * A fully-qualified path representing Project resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectName(projectName: string) { - return this.pathTemplates.projectPathTemplate.match(projectName).project; - } - - /** - * Return a fully-qualified projectLocationAutoscalingPolicy resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} autoscaling_policy - * @returns {string} Resource name string. - */ - projectLocationAutoscalingPolicyPath(project:string,location:string,autoscalingPolicy:string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render({ - project: project, - location: location, - autoscaling_policy: autoscalingPolicy, - }); - } - - /** - * Parse the project from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).project; - } - - /** - * Parse the location from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the location. - */ - matchLocationFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).location; - } - - /** - * Parse the autoscaling_policy from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the autoscaling_policy. - */ - matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).autoscaling_policy; - } - - /** - * Return a fully-qualified projectLocationWorkflowTemplate resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} workflow_template - * @returns {string} Resource name string. - */ - projectLocationWorkflowTemplatePath(project:string,location:string,workflowTemplate:string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render({ - project: project, - location: location, - workflow_template: workflowTemplate, - }); - } - - /** - * Parse the project from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).project; - } - - /** - * Parse the location from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the location. - */ - matchLocationFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).location; - } - - /** - * Parse the workflow_template from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the workflow_template. - */ - matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).workflow_template; - } - - /** - * Return a fully-qualified projectRegionAutoscalingPolicy resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} autoscaling_policy - * @returns {string} Resource name string. - */ - projectRegionAutoscalingPolicyPath(project:string,region:string,autoscalingPolicy:string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render({ - project: project, - region: region, - autoscaling_policy: autoscalingPolicy, - }); - } - - /** - * Parse the project from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).project; - } - - /** - * Parse the region from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the region. - */ - matchRegionFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).region; - } - - /** - * Parse the autoscaling_policy from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the autoscaling_policy. - */ - matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).autoscaling_policy; - } - - /** - * Return a fully-qualified projectRegionWorkflowTemplate resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} workflow_template - * @returns {string} Resource name string. - */ - projectRegionWorkflowTemplatePath(project:string,region:string,workflowTemplate:string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render({ - project: project, - region: region, - workflow_template: workflowTemplate, - }); - } - - /** - * Parse the project from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).project; - } - - /** - * Parse the region from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the region. - */ - matchRegionFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).region; - } - - /** - * Parse the workflow_template from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the workflow_template. - */ - matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).workflow_template; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.autoscalingPolicyServiceStub && !this._terminated) { - return this.autoscalingPolicyServiceStub.then(stub => { - this._terminated = true; - stub.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client_config.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client_config.json deleted file mode 100644 index 09bd892268f..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_client_config.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "interfaces": { - "google.cloud.dataproc.v1.AutoscalingPolicyService": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "CreateAutoscalingPolicy": { - "timeout_millis": 600000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default" - }, - "UpdateAutoscalingPolicy": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - }, - "GetAutoscalingPolicy": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - }, - "ListAutoscalingPolicies": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - }, - "DeleteAutoscalingPolicy": { - "timeout_millis": 600000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default" - } - } - } - } -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_proto_list.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_proto_list.json deleted file mode 100644 index 3bb7ccf055a..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/autoscaling_policy_service_proto_list.json +++ /dev/null @@ -1,10 +0,0 @@ -[ - "../../protos/google/cloud/dataproc/v1/autoscaling_policies.proto", - "../../protos/google/cloud/dataproc/v1/batches.proto", - "../../protos/google/cloud/dataproc/v1/clusters.proto", - "../../protos/google/cloud/dataproc/v1/jobs.proto", - "../../protos/google/cloud/dataproc/v1/node_groups.proto", - "../../protos/google/cloud/dataproc/v1/operations.proto", - "../../protos/google/cloud/dataproc/v1/shared.proto", - "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" -] diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client.ts deleted file mode 100644 index f172176f692..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client.ts +++ /dev/null @@ -1,1183 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import type * as gax from 'google-gax'; -import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation, PaginationCallback, GaxCall} from 'google-gax'; -import {Transform} from 'stream'; -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1/batch_controller_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './batch_controller_client_config.json'; -const version = require('../../../package.json').version; - -/** - * The BatchController provides methods to manage batch workloads. - * @class - * @memberof v1 - */ -export class BatchControllerClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - pathTemplates: {[name: string]: gax.PathTemplate}; - operationsClient: gax.OperationsClient; - batchControllerStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of BatchControllerClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you - * need to avoid loading the default gRPC version and want to use the fallback - * HTTP implementation. Load only fallback version and pass it to the constructor: - * ``` - * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC - * const client = new BatchControllerClient({fallback: 'rest'}, gax); - * ``` - */ - constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof BatchControllerClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // Request numeric enum values if REST transport is used. - opts.numericEnums = true; - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Load google-gax module synchronously if needed - if (!gaxInstance) { - gaxInstance = require('google-gax') as typeof gax; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - // This API contains "path templates"; forward-slash-separated - // identifiers to uniquely identify resources within the API. - // Create useful helper objects for these. - this.pathTemplates = { - batchPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/batches/{batch}' - ), - locationPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}' - ), - nodeGroupPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' - ), - projectPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}' - ), - projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' - ), - projectLocationWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/workflowTemplates/{workflow_template}' - ), - projectRegionAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}' - ), - projectRegionWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' - ), - }; - - // Some of the methods on this service return "paged" results, - // (e.g. 50 results at a time, with tokens to get subsequent - // pages). Denote the keys used for pagination and results. - this.descriptors.page = { - listBatches: - new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'batches') - }; - - const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a - // an Operation object that allows for tracking of the operation, - // rather than holding a request open. - const lroOptions: GrpcClientOptions = { - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined - }; - if (opts.fallback === 'rest') { - lroOptions.protoJson = protoFilesRoot; - lroOptions.httpRules = [{selector: 'google.iam.v1.IAMPolicy.GetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:getIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:getIamPolicy',body: '*',}], - },{selector: 'google.iam.v1.IAMPolicy.SetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:setIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:setIamPolicy',body: '*',}], - },{selector: 'google.iam.v1.IAMPolicy.TestIamPermissions',post: '/v1/{resource=projects/*/regions/*/clusters/*}:testIamPermissions',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:testIamPermissions',body: '*',}], - },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1/{name=projects/*/regions/*/operations/*}:cancel',},{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.ListOperations',get: '/v1/{name=projects/*/regions/*/operations}',}]; - } - this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); - const createBatchResponse = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.Batch') as gax.protobuf.Type; - const createBatchMetadata = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.BatchOperationMetadata') as gax.protobuf.Type; - - this.descriptors.longrunning = { - createBatch: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - createBatchResponse.decode.bind(createBatchResponse), - createBatchMetadata.decode.bind(createBatchMetadata)) - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.dataproc.v1.BatchController', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = this._gaxModule.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.batchControllerStub) { - return this.batchControllerStub; - } - - // Put together the "service stub" for - // google.cloud.dataproc.v1.BatchController. - this.batchControllerStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.dataproc.v1.BatchController') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.dataproc.v1.BatchController, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const batchControllerStubMethods = - ['createBatch', 'getBatch', 'listBatches', 'deleteBatch']; - for (const methodName of batchControllerStubMethods) { - const callPromise = this.batchControllerStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.page[methodName] || - this.descriptors.longrunning[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor, - this._opts.fallback - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.batchControllerStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'dataproc.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'dataproc.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- -/** - * Gets the batch workload resource representation. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.name - * Required. The name of the batch to retrieve. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [Batch]{@link google.cloud.dataproc.v1.Batch}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/batch_controller.get_batch.js - * region_tag:dataproc_v1_generated_BatchController_GetBatch_async - */ - getBatch( - request?: protos.google.cloud.dataproc.v1.IGetBatchRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IBatch, - protos.google.cloud.dataproc.v1.IGetBatchRequest|undefined, {}|undefined - ]>; - getBatch( - request: protos.google.cloud.dataproc.v1.IGetBatchRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.dataproc.v1.IBatch, - protos.google.cloud.dataproc.v1.IGetBatchRequest|null|undefined, - {}|null|undefined>): void; - getBatch( - request: protos.google.cloud.dataproc.v1.IGetBatchRequest, - callback: Callback< - protos.google.cloud.dataproc.v1.IBatch, - protos.google.cloud.dataproc.v1.IGetBatchRequest|null|undefined, - {}|null|undefined>): void; - getBatch( - request?: protos.google.cloud.dataproc.v1.IGetBatchRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.dataproc.v1.IBatch, - protos.google.cloud.dataproc.v1.IGetBatchRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.dataproc.v1.IBatch, - protos.google.cloud.dataproc.v1.IGetBatchRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.dataproc.v1.IBatch, - protos.google.cloud.dataproc.v1.IGetBatchRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'name': request.name ?? '', - }); - this.initialize(); - return this.innerApiCalls.getBatch(request, options, callback); - } -/** - * Deletes the batch workload resource. If the batch is not in terminal state, - * the delete fails and the response returns `FAILED_PRECONDITION`. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.name - * Required. The name of the batch resource to delete. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/batch_controller.delete_batch.js - * region_tag:dataproc_v1_generated_BatchController_DeleteBatch_async - */ - deleteBatch( - request?: protos.google.cloud.dataproc.v1.IDeleteBatchRequest, - options?: CallOptions): - Promise<[ - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteBatchRequest|undefined, {}|undefined - ]>; - deleteBatch( - request: protos.google.cloud.dataproc.v1.IDeleteBatchRequest, - options: CallOptions, - callback: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteBatchRequest|null|undefined, - {}|null|undefined>): void; - deleteBatch( - request: protos.google.cloud.dataproc.v1.IDeleteBatchRequest, - callback: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteBatchRequest|null|undefined, - {}|null|undefined>): void; - deleteBatch( - request?: protos.google.cloud.dataproc.v1.IDeleteBatchRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteBatchRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteBatchRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteBatchRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'name': request.name ?? '', - }); - this.initialize(); - return this.innerApiCalls.deleteBatch(request, options, callback); - } - -/** - * Creates a batch workload that executes asynchronously. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The parent resource where this batch will be created. - * @param {google.cloud.dataproc.v1.Batch} request.batch - * Required. The batch to create. - * @param {string} [request.batchId] - * Optional. The ID to use for the batch, which will become the final component of - * the batch's resource name. - * - * This value must be 4-63 characters. Valid characters are `/{@link 0-9|a-z}-/`. - * @param {string} [request.requestId] - * Optional. A unique ID used to identify the request. If the service - * receives two - * [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s - * with the same request_id, the second request is ignored and the - * Operation that corresponds to the first Batch created and stored - * in the backend is returned. - * - * Recommendation: Set this value to a - * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - * - * The value must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/batch_controller.create_batch.js - * region_tag:dataproc_v1_generated_BatchController_CreateBatch_async - */ - createBatch( - request?: protos.google.cloud.dataproc.v1.ICreateBatchRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - createBatch( - request: protos.google.cloud.dataproc.v1.ICreateBatchRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - createBatch( - request: protos.google.cloud.dataproc.v1.ICreateBatchRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - createBatch( - request?: protos.google.cloud.dataproc.v1.ICreateBatchRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - this.initialize(); - return this.innerApiCalls.createBatch(request, options, callback); - } -/** - * Check the status of the long running operation returned by `createBatch()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/batch_controller.create_batch.js - * region_tag:dataproc_v1_generated_BatchController_CreateBatch_async - */ - async checkCreateBatchProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.createBatch, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } - /** - * Lists batch workloads. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The parent, which owns this collection of batches. - * @param {number} [request.pageSize] - * Optional. The maximum number of batches to return in each response. - * The service may return fewer than this value. - * The default page size is 20; the maximum page size is 1000. - * @param {string} [request.pageToken] - * Optional. A page token received from a previous `ListBatches` call. - * Provide this token to retrieve the subsequent page. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is Array of [Batch]{@link google.cloud.dataproc.v1.Batch}. - * The client library will perform auto-pagination by default: it will call the API as many - * times as needed and will merge results from all the pages into this array. - * Note that it can affect your quota. - * We recommend using `listBatchesAsync()` - * method described below for async iteration which you can stop as needed. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - */ - listBatches( - request?: protos.google.cloud.dataproc.v1.IListBatchesRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IBatch[], - protos.google.cloud.dataproc.v1.IListBatchesRequest|null, - protos.google.cloud.dataproc.v1.IListBatchesResponse - ]>; - listBatches( - request: protos.google.cloud.dataproc.v1.IListBatchesRequest, - options: CallOptions, - callback: PaginationCallback< - protos.google.cloud.dataproc.v1.IListBatchesRequest, - protos.google.cloud.dataproc.v1.IListBatchesResponse|null|undefined, - protos.google.cloud.dataproc.v1.IBatch>): void; - listBatches( - request: protos.google.cloud.dataproc.v1.IListBatchesRequest, - callback: PaginationCallback< - protos.google.cloud.dataproc.v1.IListBatchesRequest, - protos.google.cloud.dataproc.v1.IListBatchesResponse|null|undefined, - protos.google.cloud.dataproc.v1.IBatch>): void; - listBatches( - request?: protos.google.cloud.dataproc.v1.IListBatchesRequest, - optionsOrCallback?: CallOptions|PaginationCallback< - protos.google.cloud.dataproc.v1.IListBatchesRequest, - protos.google.cloud.dataproc.v1.IListBatchesResponse|null|undefined, - protos.google.cloud.dataproc.v1.IBatch>, - callback?: PaginationCallback< - protos.google.cloud.dataproc.v1.IListBatchesRequest, - protos.google.cloud.dataproc.v1.IListBatchesResponse|null|undefined, - protos.google.cloud.dataproc.v1.IBatch>): - Promise<[ - protos.google.cloud.dataproc.v1.IBatch[], - protos.google.cloud.dataproc.v1.IListBatchesRequest|null, - protos.google.cloud.dataproc.v1.IListBatchesResponse - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - this.initialize(); - return this.innerApiCalls.listBatches(request, options, callback); - } - -/** - * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The parent, which owns this collection of batches. - * @param {number} [request.pageSize] - * Optional. The maximum number of batches to return in each response. - * The service may return fewer than this value. - * The default page size is 20; the maximum page size is 1000. - * @param {string} [request.pageToken] - * Optional. A page token received from a previous `ListBatches` call. - * Provide this token to retrieve the subsequent page. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Stream} - * An object stream which emits an object representing [Batch]{@link google.cloud.dataproc.v1.Batch} on 'data' event. - * The client library will perform auto-pagination by default: it will call the API as many - * times as needed. Note that it can affect your quota. - * We recommend using `listBatchesAsync()` - * method described below for async iteration which you can stop as needed. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - */ - listBatchesStream( - request?: protos.google.cloud.dataproc.v1.IListBatchesRequest, - options?: CallOptions): - Transform{ - request = request || {}; - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - const defaultCallSettings = this._defaults['listBatches']; - const callSettings = defaultCallSettings.merge(options); - this.initialize(); - return this.descriptors.page.listBatches.createStream( - this.innerApiCalls.listBatches as GaxCall, - request, - callSettings - ); - } - -/** - * Equivalent to `listBatches`, but returns an iterable object. - * - * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The parent, which owns this collection of batches. - * @param {number} [request.pageSize] - * Optional. The maximum number of batches to return in each response. - * The service may return fewer than this value. - * The default page size is 20; the maximum page size is 1000. - * @param {string} [request.pageToken] - * Optional. A page token received from a previous `ListBatches` call. - * Provide this token to retrieve the subsequent page. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Object} - * An iterable Object that allows [async iteration](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols). - * When you iterate the returned iterable, each element will be an object representing - * [Batch]{@link google.cloud.dataproc.v1.Batch}. The API will be called under the hood as needed, once per the page, - * so you can stop the iteration when you don't need more results. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - * @example include:samples/generated/v1/batch_controller.list_batches.js - * region_tag:dataproc_v1_generated_BatchController_ListBatches_async - */ - listBatchesAsync( - request?: protos.google.cloud.dataproc.v1.IListBatchesRequest, - options?: CallOptions): - AsyncIterable{ - request = request || {}; - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - const defaultCallSettings = this._defaults['listBatches']; - const callSettings = defaultCallSettings.merge(options); - this.initialize(); - return this.descriptors.page.listBatches.asyncIterate( - this.innerApiCalls['listBatches'] as GaxCall, - request as {}, - callSettings - ) as AsyncIterable; - } - // -------------------- - // -- Path templates -- - // -------------------- - - /** - * Return a fully-qualified batch resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} batch - * @returns {string} Resource name string. - */ - batchPath(project:string,location:string,batch:string) { - return this.pathTemplates.batchPathTemplate.render({ - project: project, - location: location, - batch: batch, - }); - } - - /** - * Parse the project from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the project. - */ - matchProjectFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).project; - } - - /** - * Parse the location from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the location. - */ - matchLocationFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).location; - } - - /** - * Parse the batch from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the batch. - */ - matchBatchFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).batch; - } - - /** - * Return a fully-qualified location resource name string. - * - * @param {string} project - * @param {string} location - * @returns {string} Resource name string. - */ - locationPath(project:string,location:string) { - return this.pathTemplates.locationPathTemplate.render({ - project: project, - location: location, - }); - } - - /** - * Parse the project from Location resource. - * - * @param {string} locationName - * A fully-qualified path representing Location resource. - * @returns {string} A string representing the project. - */ - matchProjectFromLocationName(locationName: string) { - return this.pathTemplates.locationPathTemplate.match(locationName).project; - } - - /** - * Parse the location from Location resource. - * - * @param {string} locationName - * A fully-qualified path representing Location resource. - * @returns {string} A string representing the location. - */ - matchLocationFromLocationName(locationName: string) { - return this.pathTemplates.locationPathTemplate.match(locationName).location; - } - - /** - * Return a fully-qualified nodeGroup resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} cluster - * @param {string} node_group - * @returns {string} Resource name string. - */ - nodeGroupPath(project:string,region:string,cluster:string,nodeGroup:string) { - return this.pathTemplates.nodeGroupPathTemplate.render({ - project: project, - region: region, - cluster: cluster, - node_group: nodeGroup, - }); - } - - /** - * Parse the project from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the project. - */ - matchProjectFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).project; - } - - /** - * Parse the region from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the region. - */ - matchRegionFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; - } - - /** - * Parse the cluster from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the cluster. - */ - matchClusterFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).cluster; - } - - /** - * Parse the node_group from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the node_group. - */ - matchNodeGroupFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).node_group; - } - - /** - * Return a fully-qualified project resource name string. - * - * @param {string} project - * @returns {string} Resource name string. - */ - projectPath(project:string) { - return this.pathTemplates.projectPathTemplate.render({ - project: project, - }); - } - - /** - * Parse the project from Project resource. - * - * @param {string} projectName - * A fully-qualified path representing Project resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectName(projectName: string) { - return this.pathTemplates.projectPathTemplate.match(projectName).project; - } - - /** - * Return a fully-qualified projectLocationAutoscalingPolicy resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} autoscaling_policy - * @returns {string} Resource name string. - */ - projectLocationAutoscalingPolicyPath(project:string,location:string,autoscalingPolicy:string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render({ - project: project, - location: location, - autoscaling_policy: autoscalingPolicy, - }); - } - - /** - * Parse the project from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).project; - } - - /** - * Parse the location from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the location. - */ - matchLocationFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).location; - } - - /** - * Parse the autoscaling_policy from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the autoscaling_policy. - */ - matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).autoscaling_policy; - } - - /** - * Return a fully-qualified projectLocationWorkflowTemplate resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} workflow_template - * @returns {string} Resource name string. - */ - projectLocationWorkflowTemplatePath(project:string,location:string,workflowTemplate:string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render({ - project: project, - location: location, - workflow_template: workflowTemplate, - }); - } - - /** - * Parse the project from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).project; - } - - /** - * Parse the location from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the location. - */ - matchLocationFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).location; - } - - /** - * Parse the workflow_template from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the workflow_template. - */ - matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).workflow_template; - } - - /** - * Return a fully-qualified projectRegionAutoscalingPolicy resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} autoscaling_policy - * @returns {string} Resource name string. - */ - projectRegionAutoscalingPolicyPath(project:string,region:string,autoscalingPolicy:string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render({ - project: project, - region: region, - autoscaling_policy: autoscalingPolicy, - }); - } - - /** - * Parse the project from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).project; - } - - /** - * Parse the region from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the region. - */ - matchRegionFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).region; - } - - /** - * Parse the autoscaling_policy from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the autoscaling_policy. - */ - matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).autoscaling_policy; - } - - /** - * Return a fully-qualified projectRegionWorkflowTemplate resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} workflow_template - * @returns {string} Resource name string. - */ - projectRegionWorkflowTemplatePath(project:string,region:string,workflowTemplate:string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render({ - project: project, - region: region, - workflow_template: workflowTemplate, - }); - } - - /** - * Parse the project from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).project; - } - - /** - * Parse the region from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the region. - */ - matchRegionFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).region; - } - - /** - * Parse the workflow_template from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the workflow_template. - */ - matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).workflow_template; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.batchControllerStub && !this._terminated) { - return this.batchControllerStub.then(stub => { - this._terminated = true; - stub.close(); - this.operationsClient.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client_config.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client_config.json deleted file mode 100644 index a451087cbb2..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_client_config.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "interfaces": { - "google.cloud.dataproc.v1.BatchController": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "CreateBatch": { - "retry_codes_name": "non_idempotent", - "retry_params_name": "default" - }, - "GetBatch": { - "retry_codes_name": "non_idempotent", - "retry_params_name": "default" - }, - "ListBatches": { - "retry_codes_name": "non_idempotent", - "retry_params_name": "default" - }, - "DeleteBatch": { - "retry_codes_name": "non_idempotent", - "retry_params_name": "default" - } - } - } - } -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_proto_list.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_proto_list.json deleted file mode 100644 index 3bb7ccf055a..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/batch_controller_proto_list.json +++ /dev/null @@ -1,10 +0,0 @@ -[ - "../../protos/google/cloud/dataproc/v1/autoscaling_policies.proto", - "../../protos/google/cloud/dataproc/v1/batches.proto", - "../../protos/google/cloud/dataproc/v1/clusters.proto", - "../../protos/google/cloud/dataproc/v1/jobs.proto", - "../../protos/google/cloud/dataproc/v1/node_groups.proto", - "../../protos/google/cloud/dataproc/v1/operations.proto", - "../../protos/google/cloud/dataproc/v1/shared.proto", - "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" -] diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client.ts deleted file mode 100644 index 15d1aa57e3e..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client.ts +++ /dev/null @@ -1,1840 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import type * as gax from 'google-gax'; -import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation, PaginationCallback, GaxCall} from 'google-gax'; -import {Transform} from 'stream'; -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1/cluster_controller_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './cluster_controller_client_config.json'; -const version = require('../../../package.json').version; - -/** - * The ClusterControllerService provides methods to manage clusters - * of Compute Engine instances. - * @class - * @memberof v1 - */ -export class ClusterControllerClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - pathTemplates: {[name: string]: gax.PathTemplate}; - operationsClient: gax.OperationsClient; - clusterControllerStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of ClusterControllerClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you - * need to avoid loading the default gRPC version and want to use the fallback - * HTTP implementation. Load only fallback version and pass it to the constructor: - * ``` - * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC - * const client = new ClusterControllerClient({fallback: 'rest'}, gax); - * ``` - */ - constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof ClusterControllerClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // Request numeric enum values if REST transport is used. - opts.numericEnums = true; - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Load google-gax module synchronously if needed - if (!gaxInstance) { - gaxInstance = require('google-gax') as typeof gax; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - // This API contains "path templates"; forward-slash-separated - // identifiers to uniquely identify resources within the API. - // Create useful helper objects for these. - this.pathTemplates = { - batchPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/batches/{batch}' - ), - nodeGroupPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' - ), - projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' - ), - projectLocationWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/workflowTemplates/{workflow_template}' - ), - projectRegionAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}' - ), - projectRegionWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' - ), - servicePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/services/{service}' - ), - }; - - // Some of the methods on this service return "paged" results, - // (e.g. 50 results at a time, with tokens to get subsequent - // pages). Denote the keys used for pagination and results. - this.descriptors.page = { - listClusters: - new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'clusters') - }; - - const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a - // an Operation object that allows for tracking of the operation, - // rather than holding a request open. - const lroOptions: GrpcClientOptions = { - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined - }; - if (opts.fallback === 'rest') { - lroOptions.protoJson = protoFilesRoot; - lroOptions.httpRules = [{selector: 'google.iam.v1.IAMPolicy.GetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:getIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:getIamPolicy',body: '*',}], - },{selector: 'google.iam.v1.IAMPolicy.SetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:setIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:setIamPolicy',body: '*',}], - },{selector: 'google.iam.v1.IAMPolicy.TestIamPermissions',post: '/v1/{resource=projects/*/regions/*/clusters/*}:testIamPermissions',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:testIamPermissions',body: '*',}], - },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1/{name=projects/*/regions/*/operations/*}:cancel',},{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.ListOperations',get: '/v1/{name=projects/*/regions/*/operations}',}]; - } - this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); - const createClusterResponse = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.Cluster') as gax.protobuf.Type; - const createClusterMetadata = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.ClusterOperationMetadata') as gax.protobuf.Type; - const updateClusterResponse = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.Cluster') as gax.protobuf.Type; - const updateClusterMetadata = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.ClusterOperationMetadata') as gax.protobuf.Type; - const stopClusterResponse = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.Cluster') as gax.protobuf.Type; - const stopClusterMetadata = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.ClusterOperationMetadata') as gax.protobuf.Type; - const startClusterResponse = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.Cluster') as gax.protobuf.Type; - const startClusterMetadata = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.ClusterOperationMetadata') as gax.protobuf.Type; - const deleteClusterResponse = protoFilesRoot.lookup( - '.google.protobuf.Empty') as gax.protobuf.Type; - const deleteClusterMetadata = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.ClusterOperationMetadata') as gax.protobuf.Type; - const diagnoseClusterResponse = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.DiagnoseClusterResults') as gax.protobuf.Type; - const diagnoseClusterMetadata = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.ClusterOperationMetadata') as gax.protobuf.Type; - - this.descriptors.longrunning = { - createCluster: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - createClusterResponse.decode.bind(createClusterResponse), - createClusterMetadata.decode.bind(createClusterMetadata)), - updateCluster: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - updateClusterResponse.decode.bind(updateClusterResponse), - updateClusterMetadata.decode.bind(updateClusterMetadata)), - stopCluster: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - stopClusterResponse.decode.bind(stopClusterResponse), - stopClusterMetadata.decode.bind(stopClusterMetadata)), - startCluster: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - startClusterResponse.decode.bind(startClusterResponse), - startClusterMetadata.decode.bind(startClusterMetadata)), - deleteCluster: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - deleteClusterResponse.decode.bind(deleteClusterResponse), - deleteClusterMetadata.decode.bind(deleteClusterMetadata)), - diagnoseCluster: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - diagnoseClusterResponse.decode.bind(diagnoseClusterResponse), - diagnoseClusterMetadata.decode.bind(diagnoseClusterMetadata)) - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.dataproc.v1.ClusterController', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = this._gaxModule.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.clusterControllerStub) { - return this.clusterControllerStub; - } - - // Put together the "service stub" for - // google.cloud.dataproc.v1.ClusterController. - this.clusterControllerStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.dataproc.v1.ClusterController') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.dataproc.v1.ClusterController, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const clusterControllerStubMethods = - ['createCluster', 'updateCluster', 'stopCluster', 'startCluster', 'deleteCluster', 'getCluster', 'listClusters', 'diagnoseCluster']; - for (const methodName of clusterControllerStubMethods) { - const callPromise = this.clusterControllerStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.page[methodName] || - this.descriptors.longrunning[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor, - this._opts.fallback - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.clusterControllerStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'dataproc.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'dataproc.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- -/** - * Gets the resource representation for a cluster in a project. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the cluster - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {string} request.clusterName - * Required. The cluster name. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [Cluster]{@link google.cloud.dataproc.v1.Cluster}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.get_cluster.js - * region_tag:dataproc_v1_generated_ClusterController_GetCluster_async - */ - getCluster( - request?: protos.google.cloud.dataproc.v1.IGetClusterRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.ICluster, - protos.google.cloud.dataproc.v1.IGetClusterRequest|undefined, {}|undefined - ]>; - getCluster( - request: protos.google.cloud.dataproc.v1.IGetClusterRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.dataproc.v1.ICluster, - protos.google.cloud.dataproc.v1.IGetClusterRequest|null|undefined, - {}|null|undefined>): void; - getCluster( - request: protos.google.cloud.dataproc.v1.IGetClusterRequest, - callback: Callback< - protos.google.cloud.dataproc.v1.ICluster, - protos.google.cloud.dataproc.v1.IGetClusterRequest|null|undefined, - {}|null|undefined>): void; - getCluster( - request?: protos.google.cloud.dataproc.v1.IGetClusterRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.dataproc.v1.ICluster, - protos.google.cloud.dataproc.v1.IGetClusterRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.dataproc.v1.ICluster, - protos.google.cloud.dataproc.v1.IGetClusterRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.dataproc.v1.ICluster, - protos.google.cloud.dataproc.v1.IGetClusterRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - 'cluster_name': request.clusterName ?? '', - }); - this.initialize(); - return this.innerApiCalls.getCluster(request, options, callback); - } - -/** - * Creates a cluster in a project. The returned - * {@link google.longrunning.Operation.metadata|Operation.metadata} will be - * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the cluster - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {google.cloud.dataproc.v1.Cluster} request.cluster - * Required. The cluster to create. - * @param {string} [request.requestId] - * Optional. A unique ID used to identify the request. If the server receives - * two - * [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s - * with the same id, then the second request will be ignored and the - * first {@link google.longrunning.Operation|google.longrunning.Operation} created - * and stored in the backend is returned. - * - * It is recommended to always set this value to a - * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - * - * The ID must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - * @param {google.cloud.dataproc.v1.FailureAction} [request.actionOnFailedPrimaryWorkers] - * Optional. Failure action when primary worker creation fails. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.create_cluster.js - * region_tag:dataproc_v1_generated_ClusterController_CreateCluster_async - */ - createCluster( - request?: protos.google.cloud.dataproc.v1.ICreateClusterRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - createCluster( - request: protos.google.cloud.dataproc.v1.ICreateClusterRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - createCluster( - request: protos.google.cloud.dataproc.v1.ICreateClusterRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - createCluster( - request?: protos.google.cloud.dataproc.v1.ICreateClusterRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - }); - this.initialize(); - return this.innerApiCalls.createCluster(request, options, callback); - } -/** - * Check the status of the long running operation returned by `createCluster()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.create_cluster.js - * region_tag:dataproc_v1_generated_ClusterController_CreateCluster_async - */ - async checkCreateClusterProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.createCluster, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } -/** - * Updates a cluster in a project. The returned - * {@link google.longrunning.Operation.metadata|Operation.metadata} will be - * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - * The cluster must be in a - * {@link google.cloud.dataproc.v1.ClusterStatus.State|`RUNNING`} state or an error - * is returned. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project the - * cluster belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {string} request.clusterName - * Required. The cluster name. - * @param {google.cloud.dataproc.v1.Cluster} request.cluster - * Required. The changes to the cluster. - * @param {google.protobuf.Duration} [request.gracefulDecommissionTimeout] - * Optional. Timeout for graceful YARN decomissioning. Graceful - * decommissioning allows removing nodes from the cluster without - * interrupting jobs in progress. Timeout specifies how long to wait for jobs - * in progress to finish before forcefully removing nodes (and potentially - * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. (see JSON representation of - * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). - * - * Only supported on Dataproc image versions 1.2 and higher. - * @param {google.protobuf.FieldMask} request.updateMask - * Required. Specifies the path, relative to `Cluster`, of - * the field to update. For example, to change the number of workers - * in a cluster to 5, the `update_mask` parameter would be - * specified as `config.worker_config.num_instances`, - * and the `PATCH` request body would specify the new value, as follows: - * - * { - * "config":{ - * "workerConfig":{ - * "numInstances":"5" - * } - * } - * } - * Similarly, to change the number of preemptible workers in a cluster to 5, - * the `update_mask` parameter would be - * `config.secondary_worker_config.num_instances`, and the `PATCH` request - * body would be set as follows: - * - * { - * "config":{ - * "secondaryWorkerConfig":{ - * "numInstances":"5" - * } - * } - * } - * Note: Currently, only the following fields can be updated: - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or - * change autoscaling policies
- * @param {string} [request.requestId] - * Optional. A unique ID used to identify the request. If the server - * receives two - * [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s - * with the same id, then the second request will be ignored and the - * first {@link google.longrunning.Operation|google.longrunning.Operation} created - * and stored in the backend is returned. - * - * It is recommended to always set this value to a - * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - * - * The ID must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.update_cluster.js - * region_tag:dataproc_v1_generated_ClusterController_UpdateCluster_async - */ - updateCluster( - request?: protos.google.cloud.dataproc.v1.IUpdateClusterRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - updateCluster( - request: protos.google.cloud.dataproc.v1.IUpdateClusterRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - updateCluster( - request: protos.google.cloud.dataproc.v1.IUpdateClusterRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - updateCluster( - request?: protos.google.cloud.dataproc.v1.IUpdateClusterRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - 'cluster_name': request.clusterName ?? '', - }); - this.initialize(); - return this.innerApiCalls.updateCluster(request, options, callback); - } -/** - * Check the status of the long running operation returned by `updateCluster()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.update_cluster.js - * region_tag:dataproc_v1_generated_ClusterController_UpdateCluster_async - */ - async checkUpdateClusterProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.updateCluster, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } -/** - * Stops a cluster in a project. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project the - * cluster belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {string} request.clusterName - * Required. The cluster name. - * @param {string} [request.clusterUuid] - * Optional. Specifying the `cluster_uuid` means the RPC will fail - * (with error NOT_FOUND) if a cluster with the specified UUID does not exist. - * @param {string} [request.requestId] - * Optional. A unique ID used to identify the request. If the server - * receives two - * [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s - * with the same id, then the second request will be ignored and the - * first {@link google.longrunning.Operation|google.longrunning.Operation} created - * and stored in the backend is returned. - * - * Recommendation: Set this value to a - * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - * - * The ID must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.stop_cluster.js - * region_tag:dataproc_v1_generated_ClusterController_StopCluster_async - */ - stopCluster( - request?: protos.google.cloud.dataproc.v1.IStopClusterRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - stopCluster( - request: protos.google.cloud.dataproc.v1.IStopClusterRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - stopCluster( - request: protos.google.cloud.dataproc.v1.IStopClusterRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - stopCluster( - request?: protos.google.cloud.dataproc.v1.IStopClusterRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - 'cluster_name': request.clusterName ?? '', - }); - this.initialize(); - return this.innerApiCalls.stopCluster(request, options, callback); - } -/** - * Check the status of the long running operation returned by `stopCluster()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.stop_cluster.js - * region_tag:dataproc_v1_generated_ClusterController_StopCluster_async - */ - async checkStopClusterProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.stopCluster, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } -/** - * Starts a cluster in a project. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project the - * cluster belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {string} request.clusterName - * Required. The cluster name. - * @param {string} [request.clusterUuid] - * Optional. Specifying the `cluster_uuid` means the RPC will fail - * (with error NOT_FOUND) if a cluster with the specified UUID does not exist. - * @param {string} [request.requestId] - * Optional. A unique ID used to identify the request. If the server - * receives two - * [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s - * with the same id, then the second request will be ignored and the - * first {@link google.longrunning.Operation|google.longrunning.Operation} created - * and stored in the backend is returned. - * - * Recommendation: Set this value to a - * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - * - * The ID must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.start_cluster.js - * region_tag:dataproc_v1_generated_ClusterController_StartCluster_async - */ - startCluster( - request?: protos.google.cloud.dataproc.v1.IStartClusterRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - startCluster( - request: protos.google.cloud.dataproc.v1.IStartClusterRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - startCluster( - request: protos.google.cloud.dataproc.v1.IStartClusterRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - startCluster( - request?: protos.google.cloud.dataproc.v1.IStartClusterRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - 'cluster_name': request.clusterName ?? '', - }); - this.initialize(); - return this.innerApiCalls.startCluster(request, options, callback); - } -/** - * Check the status of the long running operation returned by `startCluster()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.start_cluster.js - * region_tag:dataproc_v1_generated_ClusterController_StartCluster_async - */ - async checkStartClusterProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.startCluster, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } -/** - * Deletes a cluster in a project. The returned - * {@link google.longrunning.Operation.metadata|Operation.metadata} will be - * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the cluster - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {string} request.clusterName - * Required. The cluster name. - * @param {string} [request.clusterUuid] - * Optional. Specifying the `cluster_uuid` means the RPC should fail - * (with error NOT_FOUND) if cluster with specified UUID does not exist. - * @param {string} [request.requestId] - * Optional. A unique ID used to identify the request. If the server - * receives two - * [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s - * with the same id, then the second request will be ignored and the - * first {@link google.longrunning.Operation|google.longrunning.Operation} created - * and stored in the backend is returned. - * - * It is recommended to always set this value to a - * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - * - * The ID must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.delete_cluster.js - * region_tag:dataproc_v1_generated_ClusterController_DeleteCluster_async - */ - deleteCluster( - request?: protos.google.cloud.dataproc.v1.IDeleteClusterRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - deleteCluster( - request: protos.google.cloud.dataproc.v1.IDeleteClusterRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - deleteCluster( - request: protos.google.cloud.dataproc.v1.IDeleteClusterRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - deleteCluster( - request?: protos.google.cloud.dataproc.v1.IDeleteClusterRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - 'cluster_name': request.clusterName ?? '', - }); - this.initialize(); - return this.innerApiCalls.deleteCluster(request, options, callback); - } -/** - * Check the status of the long running operation returned by `deleteCluster()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.delete_cluster.js - * region_tag:dataproc_v1_generated_ClusterController_DeleteCluster_async - */ - async checkDeleteClusterProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.deleteCluster, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } -/** - * Gets cluster diagnostic information. The returned - * {@link google.longrunning.Operation.metadata|Operation.metadata} will be - * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - * After the operation completes, - * {@link google.longrunning.Operation.response|Operation.response} - * contains - * [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the cluster - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {string} request.clusterName - * Required. The cluster name. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.diagnose_cluster.js - * region_tag:dataproc_v1_generated_ClusterController_DiagnoseCluster_async - */ - diagnoseCluster( - request?: protos.google.cloud.dataproc.v1.IDiagnoseClusterRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - diagnoseCluster( - request: protos.google.cloud.dataproc.v1.IDiagnoseClusterRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - diagnoseCluster( - request: protos.google.cloud.dataproc.v1.IDiagnoseClusterRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - diagnoseCluster( - request?: protos.google.cloud.dataproc.v1.IDiagnoseClusterRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - 'cluster_name': request.clusterName ?? '', - }); - this.initialize(); - return this.innerApiCalls.diagnoseCluster(request, options, callback); - } -/** - * Check the status of the long running operation returned by `diagnoseCluster()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.diagnose_cluster.js - * region_tag:dataproc_v1_generated_ClusterController_DiagnoseCluster_async - */ - async checkDiagnoseClusterProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.diagnoseCluster, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } - /** - * Lists all regions/{region}/clusters in a project alphabetically. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the cluster - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {string} [request.filter] - * Optional. A filter constraining the clusters to list. Filters are - * case-sensitive and have the following syntax: - * - * field = value [AND [field = value]] ... - * - * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, - * and `[KEY]` is a label key. **value** can be `*` to match all values. - * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, - * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` - * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` - * contains the `DELETING` and `ERROR` states. - * `clusterName` is the name of the cluster provided at creation time. - * Only the logical `AND` operator is supported; space-separated items are - * treated as having an implicit `AND` operator. - * - * Example filter: - * - * status.state = ACTIVE AND clusterName = mycluster - * AND labels.env = staging AND labels.starred = * - * @param {number} [request.pageSize] - * Optional. The standard List page size. - * @param {string} [request.pageToken] - * Optional. The standard List page token. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is Array of [Cluster]{@link google.cloud.dataproc.v1.Cluster}. - * The client library will perform auto-pagination by default: it will call the API as many - * times as needed and will merge results from all the pages into this array. - * Note that it can affect your quota. - * We recommend using `listClustersAsync()` - * method described below for async iteration which you can stop as needed. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - */ - listClusters( - request?: protos.google.cloud.dataproc.v1.IListClustersRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.ICluster[], - protos.google.cloud.dataproc.v1.IListClustersRequest|null, - protos.google.cloud.dataproc.v1.IListClustersResponse - ]>; - listClusters( - request: protos.google.cloud.dataproc.v1.IListClustersRequest, - options: CallOptions, - callback: PaginationCallback< - protos.google.cloud.dataproc.v1.IListClustersRequest, - protos.google.cloud.dataproc.v1.IListClustersResponse|null|undefined, - protos.google.cloud.dataproc.v1.ICluster>): void; - listClusters( - request: protos.google.cloud.dataproc.v1.IListClustersRequest, - callback: PaginationCallback< - protos.google.cloud.dataproc.v1.IListClustersRequest, - protos.google.cloud.dataproc.v1.IListClustersResponse|null|undefined, - protos.google.cloud.dataproc.v1.ICluster>): void; - listClusters( - request?: protos.google.cloud.dataproc.v1.IListClustersRequest, - optionsOrCallback?: CallOptions|PaginationCallback< - protos.google.cloud.dataproc.v1.IListClustersRequest, - protos.google.cloud.dataproc.v1.IListClustersResponse|null|undefined, - protos.google.cloud.dataproc.v1.ICluster>, - callback?: PaginationCallback< - protos.google.cloud.dataproc.v1.IListClustersRequest, - protos.google.cloud.dataproc.v1.IListClustersResponse|null|undefined, - protos.google.cloud.dataproc.v1.ICluster>): - Promise<[ - protos.google.cloud.dataproc.v1.ICluster[], - protos.google.cloud.dataproc.v1.IListClustersRequest|null, - protos.google.cloud.dataproc.v1.IListClustersResponse - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - }); - this.initialize(); - return this.innerApiCalls.listClusters(request, options, callback); - } - -/** - * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the cluster - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {string} [request.filter] - * Optional. A filter constraining the clusters to list. Filters are - * case-sensitive and have the following syntax: - * - * field = value [AND [field = value]] ... - * - * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, - * and `[KEY]` is a label key. **value** can be `*` to match all values. - * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, - * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` - * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` - * contains the `DELETING` and `ERROR` states. - * `clusterName` is the name of the cluster provided at creation time. - * Only the logical `AND` operator is supported; space-separated items are - * treated as having an implicit `AND` operator. - * - * Example filter: - * - * status.state = ACTIVE AND clusterName = mycluster - * AND labels.env = staging AND labels.starred = * - * @param {number} [request.pageSize] - * Optional. The standard List page size. - * @param {string} [request.pageToken] - * Optional. The standard List page token. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Stream} - * An object stream which emits an object representing [Cluster]{@link google.cloud.dataproc.v1.Cluster} on 'data' event. - * The client library will perform auto-pagination by default: it will call the API as many - * times as needed. Note that it can affect your quota. - * We recommend using `listClustersAsync()` - * method described below for async iteration which you can stop as needed. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - */ - listClustersStream( - request?: protos.google.cloud.dataproc.v1.IListClustersRequest, - options?: CallOptions): - Transform{ - request = request || {}; - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - }); - const defaultCallSettings = this._defaults['listClusters']; - const callSettings = defaultCallSettings.merge(options); - this.initialize(); - return this.descriptors.page.listClusters.createStream( - this.innerApiCalls.listClusters as GaxCall, - request, - callSettings - ); - } - -/** - * Equivalent to `listClusters`, but returns an iterable object. - * - * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the cluster - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {string} [request.filter] - * Optional. A filter constraining the clusters to list. Filters are - * case-sensitive and have the following syntax: - * - * field = value [AND [field = value]] ... - * - * where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, - * and `[KEY]` is a label key. **value** can be `*` to match all values. - * `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, - * `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` - * contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` - * contains the `DELETING` and `ERROR` states. - * `clusterName` is the name of the cluster provided at creation time. - * Only the logical `AND` operator is supported; space-separated items are - * treated as having an implicit `AND` operator. - * - * Example filter: - * - * status.state = ACTIVE AND clusterName = mycluster - * AND labels.env = staging AND labels.starred = * - * @param {number} [request.pageSize] - * Optional. The standard List page size. - * @param {string} [request.pageToken] - * Optional. The standard List page token. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Object} - * An iterable Object that allows [async iteration](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols). - * When you iterate the returned iterable, each element will be an object representing - * [Cluster]{@link google.cloud.dataproc.v1.Cluster}. The API will be called under the hood as needed, once per the page, - * so you can stop the iteration when you don't need more results. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - * @example include:samples/generated/v1/cluster_controller.list_clusters.js - * region_tag:dataproc_v1_generated_ClusterController_ListClusters_async - */ - listClustersAsync( - request?: protos.google.cloud.dataproc.v1.IListClustersRequest, - options?: CallOptions): - AsyncIterable{ - request = request || {}; - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - }); - const defaultCallSettings = this._defaults['listClusters']; - const callSettings = defaultCallSettings.merge(options); - this.initialize(); - return this.descriptors.page.listClusters.asyncIterate( - this.innerApiCalls['listClusters'] as GaxCall, - request as {}, - callSettings - ) as AsyncIterable; - } - // -------------------- - // -- Path templates -- - // -------------------- - - /** - * Return a fully-qualified batch resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} batch - * @returns {string} Resource name string. - */ - batchPath(project:string,location:string,batch:string) { - return this.pathTemplates.batchPathTemplate.render({ - project: project, - location: location, - batch: batch, - }); - } - - /** - * Parse the project from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the project. - */ - matchProjectFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).project; - } - - /** - * Parse the location from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the location. - */ - matchLocationFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).location; - } - - /** - * Parse the batch from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the batch. - */ - matchBatchFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).batch; - } - - /** - * Return a fully-qualified nodeGroup resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} cluster - * @param {string} node_group - * @returns {string} Resource name string. - */ - nodeGroupPath(project:string,region:string,cluster:string,nodeGroup:string) { - return this.pathTemplates.nodeGroupPathTemplate.render({ - project: project, - region: region, - cluster: cluster, - node_group: nodeGroup, - }); - } - - /** - * Parse the project from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the project. - */ - matchProjectFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).project; - } - - /** - * Parse the region from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the region. - */ - matchRegionFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; - } - - /** - * Parse the cluster from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the cluster. - */ - matchClusterFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).cluster; - } - - /** - * Parse the node_group from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the node_group. - */ - matchNodeGroupFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).node_group; - } - - /** - * Return a fully-qualified projectLocationAutoscalingPolicy resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} autoscaling_policy - * @returns {string} Resource name string. - */ - projectLocationAutoscalingPolicyPath(project:string,location:string,autoscalingPolicy:string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render({ - project: project, - location: location, - autoscaling_policy: autoscalingPolicy, - }); - } - - /** - * Parse the project from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).project; - } - - /** - * Parse the location from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the location. - */ - matchLocationFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).location; - } - - /** - * Parse the autoscaling_policy from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the autoscaling_policy. - */ - matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).autoscaling_policy; - } - - /** - * Return a fully-qualified projectLocationWorkflowTemplate resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} workflow_template - * @returns {string} Resource name string. - */ - projectLocationWorkflowTemplatePath(project:string,location:string,workflowTemplate:string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render({ - project: project, - location: location, - workflow_template: workflowTemplate, - }); - } - - /** - * Parse the project from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).project; - } - - /** - * Parse the location from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the location. - */ - matchLocationFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).location; - } - - /** - * Parse the workflow_template from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the workflow_template. - */ - matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).workflow_template; - } - - /** - * Return a fully-qualified projectRegionAutoscalingPolicy resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} autoscaling_policy - * @returns {string} Resource name string. - */ - projectRegionAutoscalingPolicyPath(project:string,region:string,autoscalingPolicy:string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render({ - project: project, - region: region, - autoscaling_policy: autoscalingPolicy, - }); - } - - /** - * Parse the project from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).project; - } - - /** - * Parse the region from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the region. - */ - matchRegionFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).region; - } - - /** - * Parse the autoscaling_policy from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the autoscaling_policy. - */ - matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).autoscaling_policy; - } - - /** - * Return a fully-qualified projectRegionWorkflowTemplate resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} workflow_template - * @returns {string} Resource name string. - */ - projectRegionWorkflowTemplatePath(project:string,region:string,workflowTemplate:string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render({ - project: project, - region: region, - workflow_template: workflowTemplate, - }); - } - - /** - * Parse the project from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).project; - } - - /** - * Parse the region from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the region. - */ - matchRegionFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).region; - } - - /** - * Parse the workflow_template from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the workflow_template. - */ - matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).workflow_template; - } - - /** - * Return a fully-qualified service resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} service - * @returns {string} Resource name string. - */ - servicePath(project:string,location:string,service:string) { - return this.pathTemplates.servicePathTemplate.render({ - project: project, - location: location, - service: service, - }); - } - - /** - * Parse the project from Service resource. - * - * @param {string} serviceName - * A fully-qualified path representing Service resource. - * @returns {string} A string representing the project. - */ - matchProjectFromServiceName(serviceName: string) { - return this.pathTemplates.servicePathTemplate.match(serviceName).project; - } - - /** - * Parse the location from Service resource. - * - * @param {string} serviceName - * A fully-qualified path representing Service resource. - * @returns {string} A string representing the location. - */ - matchLocationFromServiceName(serviceName: string) { - return this.pathTemplates.servicePathTemplate.match(serviceName).location; - } - - /** - * Parse the service from Service resource. - * - * @param {string} serviceName - * A fully-qualified path representing Service resource. - * @returns {string} A string representing the service. - */ - matchServiceFromServiceName(serviceName: string) { - return this.pathTemplates.servicePathTemplate.match(serviceName).service; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.clusterControllerStub && !this._terminated) { - return this.clusterControllerStub.then(stub => { - this._terminated = true; - stub.close(); - this.operationsClient.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client_config.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client_config.json deleted file mode 100644 index 6f5f5f3dd1d..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_client_config.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "interfaces": { - "google.cloud.dataproc.v1.ClusterController": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ], - "unavailable": [ - "UNAVAILABLE" - ], - "deadline_exceeded_internal_unavailable": [ - "DEADLINE_EXCEEDED", - "INTERNAL", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "CreateCluster": { - "timeout_millis": 300000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - }, - "UpdateCluster": { - "timeout_millis": 300000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - }, - "StopCluster": { - "retry_codes_name": "non_idempotent", - "retry_params_name": "default" - }, - "StartCluster": { - "retry_codes_name": "non_idempotent", - "retry_params_name": "default" - }, - "DeleteCluster": { - "timeout_millis": 300000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - }, - "GetCluster": { - "timeout_millis": 300000, - "retry_codes_name": "deadline_exceeded_internal_unavailable", - "retry_params_name": "default" - }, - "ListClusters": { - "timeout_millis": 300000, - "retry_codes_name": "deadline_exceeded_internal_unavailable", - "retry_params_name": "default" - }, - "DiagnoseCluster": { - "timeout_millis": 300000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - } - } - } - } -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_proto_list.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_proto_list.json deleted file mode 100644 index 3bb7ccf055a..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/cluster_controller_proto_list.json +++ /dev/null @@ -1,10 +0,0 @@ -[ - "../../protos/google/cloud/dataproc/v1/autoscaling_policies.proto", - "../../protos/google/cloud/dataproc/v1/batches.proto", - "../../protos/google/cloud/dataproc/v1/clusters.proto", - "../../protos/google/cloud/dataproc/v1/jobs.proto", - "../../protos/google/cloud/dataproc/v1/node_groups.proto", - "../../protos/google/cloud/dataproc/v1/operations.proto", - "../../protos/google/cloud/dataproc/v1/shared.proto", - "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" -] diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/gapic_metadata.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/gapic_metadata.json deleted file mode 100644 index 36b4ef4bf5d..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/gapic_metadata.json +++ /dev/null @@ -1,453 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "typescript", - "protoPackage": "google.cloud.dataproc.v1", - "libraryPackage": "@google-cloud/dataproc", - "services": { - "AutoscalingPolicyService": { - "clients": { - "grpc": { - "libraryClient": "AutoscalingPolicyServiceClient", - "rpcs": { - "CreateAutoscalingPolicy": { - "methods": [ - "createAutoscalingPolicy" - ] - }, - "UpdateAutoscalingPolicy": { - "methods": [ - "updateAutoscalingPolicy" - ] - }, - "GetAutoscalingPolicy": { - "methods": [ - "getAutoscalingPolicy" - ] - }, - "DeleteAutoscalingPolicy": { - "methods": [ - "deleteAutoscalingPolicy" - ] - }, - "ListAutoscalingPolicies": { - "methods": [ - "listAutoscalingPolicies", - "listAutoscalingPoliciesStream", - "listAutoscalingPoliciesAsync" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "AutoscalingPolicyServiceClient", - "rpcs": { - "CreateAutoscalingPolicy": { - "methods": [ - "createAutoscalingPolicy" - ] - }, - "UpdateAutoscalingPolicy": { - "methods": [ - "updateAutoscalingPolicy" - ] - }, - "GetAutoscalingPolicy": { - "methods": [ - "getAutoscalingPolicy" - ] - }, - "DeleteAutoscalingPolicy": { - "methods": [ - "deleteAutoscalingPolicy" - ] - }, - "ListAutoscalingPolicies": { - "methods": [ - "listAutoscalingPolicies", - "listAutoscalingPoliciesStream", - "listAutoscalingPoliciesAsync" - ] - } - } - } - } - }, - "BatchController": { - "clients": { - "grpc": { - "libraryClient": "BatchControllerClient", - "rpcs": { - "GetBatch": { - "methods": [ - "getBatch" - ] - }, - "DeleteBatch": { - "methods": [ - "deleteBatch" - ] - }, - "CreateBatch": { - "methods": [ - "createBatch" - ] - }, - "ListBatches": { - "methods": [ - "listBatches", - "listBatchesStream", - "listBatchesAsync" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "BatchControllerClient", - "rpcs": { - "GetBatch": { - "methods": [ - "getBatch" - ] - }, - "DeleteBatch": { - "methods": [ - "deleteBatch" - ] - }, - "CreateBatch": { - "methods": [ - "createBatch" - ] - }, - "ListBatches": { - "methods": [ - "listBatches", - "listBatchesStream", - "listBatchesAsync" - ] - } - } - } - } - }, - "ClusterController": { - "clients": { - "grpc": { - "libraryClient": "ClusterControllerClient", - "rpcs": { - "GetCluster": { - "methods": [ - "getCluster" - ] - }, - "CreateCluster": { - "methods": [ - "createCluster" - ] - }, - "UpdateCluster": { - "methods": [ - "updateCluster" - ] - }, - "StopCluster": { - "methods": [ - "stopCluster" - ] - }, - "StartCluster": { - "methods": [ - "startCluster" - ] - }, - "DeleteCluster": { - "methods": [ - "deleteCluster" - ] - }, - "DiagnoseCluster": { - "methods": [ - "diagnoseCluster" - ] - }, - "ListClusters": { - "methods": [ - "listClusters", - "listClustersStream", - "listClustersAsync" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "ClusterControllerClient", - "rpcs": { - "GetCluster": { - "methods": [ - "getCluster" - ] - }, - "CreateCluster": { - "methods": [ - "createCluster" - ] - }, - "UpdateCluster": { - "methods": [ - "updateCluster" - ] - }, - "StopCluster": { - "methods": [ - "stopCluster" - ] - }, - "StartCluster": { - "methods": [ - "startCluster" - ] - }, - "DeleteCluster": { - "methods": [ - "deleteCluster" - ] - }, - "DiagnoseCluster": { - "methods": [ - "diagnoseCluster" - ] - }, - "ListClusters": { - "methods": [ - "listClusters", - "listClustersStream", - "listClustersAsync" - ] - } - } - } - } - }, - "JobController": { - "clients": { - "grpc": { - "libraryClient": "JobControllerClient", - "rpcs": { - "SubmitJob": { - "methods": [ - "submitJob" - ] - }, - "GetJob": { - "methods": [ - "getJob" - ] - }, - "UpdateJob": { - "methods": [ - "updateJob" - ] - }, - "CancelJob": { - "methods": [ - "cancelJob" - ] - }, - "DeleteJob": { - "methods": [ - "deleteJob" - ] - }, - "SubmitJobAsOperation": { - "methods": [ - "submitJobAsOperation" - ] - }, - "ListJobs": { - "methods": [ - "listJobs", - "listJobsStream", - "listJobsAsync" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "JobControllerClient", - "rpcs": { - "SubmitJob": { - "methods": [ - "submitJob" - ] - }, - "GetJob": { - "methods": [ - "getJob" - ] - }, - "UpdateJob": { - "methods": [ - "updateJob" - ] - }, - "CancelJob": { - "methods": [ - "cancelJob" - ] - }, - "DeleteJob": { - "methods": [ - "deleteJob" - ] - }, - "SubmitJobAsOperation": { - "methods": [ - "submitJobAsOperation" - ] - }, - "ListJobs": { - "methods": [ - "listJobs", - "listJobsStream", - "listJobsAsync" - ] - } - } - } - } - }, - "NodeGroupController": { - "clients": { - "grpc": { - "libraryClient": "NodeGroupControllerClient", - "rpcs": { - "GetNodeGroup": { - "methods": [ - "getNodeGroup" - ] - }, - "CreateNodeGroup": { - "methods": [ - "createNodeGroup" - ] - }, - "ResizeNodeGroup": { - "methods": [ - "resizeNodeGroup" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "NodeGroupControllerClient", - "rpcs": { - "GetNodeGroup": { - "methods": [ - "getNodeGroup" - ] - }, - "CreateNodeGroup": { - "methods": [ - "createNodeGroup" - ] - }, - "ResizeNodeGroup": { - "methods": [ - "resizeNodeGroup" - ] - } - } - } - } - }, - "WorkflowTemplateService": { - "clients": { - "grpc": { - "libraryClient": "WorkflowTemplateServiceClient", - "rpcs": { - "CreateWorkflowTemplate": { - "methods": [ - "createWorkflowTemplate" - ] - }, - "GetWorkflowTemplate": { - "methods": [ - "getWorkflowTemplate" - ] - }, - "UpdateWorkflowTemplate": { - "methods": [ - "updateWorkflowTemplate" - ] - }, - "DeleteWorkflowTemplate": { - "methods": [ - "deleteWorkflowTemplate" - ] - }, - "InstantiateWorkflowTemplate": { - "methods": [ - "instantiateWorkflowTemplate" - ] - }, - "InstantiateInlineWorkflowTemplate": { - "methods": [ - "instantiateInlineWorkflowTemplate" - ] - }, - "ListWorkflowTemplates": { - "methods": [ - "listWorkflowTemplates", - "listWorkflowTemplatesStream", - "listWorkflowTemplatesAsync" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "WorkflowTemplateServiceClient", - "rpcs": { - "CreateWorkflowTemplate": { - "methods": [ - "createWorkflowTemplate" - ] - }, - "GetWorkflowTemplate": { - "methods": [ - "getWorkflowTemplate" - ] - }, - "UpdateWorkflowTemplate": { - "methods": [ - "updateWorkflowTemplate" - ] - }, - "DeleteWorkflowTemplate": { - "methods": [ - "deleteWorkflowTemplate" - ] - }, - "InstantiateWorkflowTemplate": { - "methods": [ - "instantiateWorkflowTemplate" - ] - }, - "InstantiateInlineWorkflowTemplate": { - "methods": [ - "instantiateInlineWorkflowTemplate" - ] - }, - "ListWorkflowTemplates": { - "methods": [ - "listWorkflowTemplates", - "listWorkflowTemplatesStream", - "listWorkflowTemplatesAsync" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/index.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/index.ts deleted file mode 100644 index 10f41804708..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/index.ts +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -export {AutoscalingPolicyServiceClient} from './autoscaling_policy_service_client'; -export {BatchControllerClient} from './batch_controller_client'; -export {ClusterControllerClient} from './cluster_controller_client'; -export {JobControllerClient} from './job_controller_client'; -export {NodeGroupControllerClient} from './node_group_controller_client'; -export {WorkflowTemplateServiceClient} from './workflow_template_service_client'; diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client.ts deleted file mode 100644 index cf8f723808b..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client.ts +++ /dev/null @@ -1,1465 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import type * as gax from 'google-gax'; -import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation, PaginationCallback, GaxCall} from 'google-gax'; -import {Transform} from 'stream'; -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1/job_controller_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './job_controller_client_config.json'; -const version = require('../../../package.json').version; - -/** - * The JobController provides methods to manage jobs. - * @class - * @memberof v1 - */ -export class JobControllerClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - pathTemplates: {[name: string]: gax.PathTemplate}; - operationsClient: gax.OperationsClient; - jobControllerStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of JobControllerClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you - * need to avoid loading the default gRPC version and want to use the fallback - * HTTP implementation. Load only fallback version and pass it to the constructor: - * ``` - * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC - * const client = new JobControllerClient({fallback: 'rest'}, gax); - * ``` - */ - constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof JobControllerClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // Request numeric enum values if REST transport is used. - opts.numericEnums = true; - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Load google-gax module synchronously if needed - if (!gaxInstance) { - gaxInstance = require('google-gax') as typeof gax; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - // This API contains "path templates"; forward-slash-separated - // identifiers to uniquely identify resources within the API. - // Create useful helper objects for these. - this.pathTemplates = { - batchPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/batches/{batch}' - ), - nodeGroupPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' - ), - projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' - ), - projectLocationWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/workflowTemplates/{workflow_template}' - ), - projectRegionAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}' - ), - projectRegionWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' - ), - }; - - // Some of the methods on this service return "paged" results, - // (e.g. 50 results at a time, with tokens to get subsequent - // pages). Denote the keys used for pagination and results. - this.descriptors.page = { - listJobs: - new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'jobs') - }; - - const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a - // an Operation object that allows for tracking of the operation, - // rather than holding a request open. - const lroOptions: GrpcClientOptions = { - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined - }; - if (opts.fallback === 'rest') { - lroOptions.protoJson = protoFilesRoot; - lroOptions.httpRules = [{selector: 'google.iam.v1.IAMPolicy.GetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:getIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:getIamPolicy',body: '*',}], - },{selector: 'google.iam.v1.IAMPolicy.SetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:setIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:setIamPolicy',body: '*',}], - },{selector: 'google.iam.v1.IAMPolicy.TestIamPermissions',post: '/v1/{resource=projects/*/regions/*/clusters/*}:testIamPermissions',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:testIamPermissions',body: '*',}], - },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1/{name=projects/*/regions/*/operations/*}:cancel',},{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.ListOperations',get: '/v1/{name=projects/*/regions/*/operations}',}]; - } - this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); - const submitJobAsOperationResponse = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.Job') as gax.protobuf.Type; - const submitJobAsOperationMetadata = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.JobMetadata') as gax.protobuf.Type; - - this.descriptors.longrunning = { - submitJobAsOperation: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - submitJobAsOperationResponse.decode.bind(submitJobAsOperationResponse), - submitJobAsOperationMetadata.decode.bind(submitJobAsOperationMetadata)) - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.dataproc.v1.JobController', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = this._gaxModule.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.jobControllerStub) { - return this.jobControllerStub; - } - - // Put together the "service stub" for - // google.cloud.dataproc.v1.JobController. - this.jobControllerStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.dataproc.v1.JobController') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.dataproc.v1.JobController, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const jobControllerStubMethods = - ['submitJob', 'submitJobAsOperation', 'getJob', 'listJobs', 'updateJob', 'cancelJob', 'deleteJob']; - for (const methodName of jobControllerStubMethods) { - const callPromise = this.jobControllerStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.page[methodName] || - this.descriptors.longrunning[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor, - this._opts.fallback - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.jobControllerStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'dataproc.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'dataproc.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- -/** - * Submits a job to a cluster. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {google.cloud.dataproc.v1.Job} request.job - * Required. The job resource. - * @param {string} [request.requestId] - * Optional. A unique id used to identify the request. If the server - * receives two - * [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s - * with the same id, then the second request will be ignored and the - * first {@link google.cloud.dataproc.v1.Job|Job} created and stored in the backend - * is returned. - * - * It is recommended to always set this value to a - * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - * - * The id must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [Job]{@link google.cloud.dataproc.v1.Job}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/job_controller.submit_job.js - * region_tag:dataproc_v1_generated_JobController_SubmitJob_async - */ - submitJob( - request?: protos.google.cloud.dataproc.v1.ISubmitJobRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.ISubmitJobRequest|undefined, {}|undefined - ]>; - submitJob( - request: protos.google.cloud.dataproc.v1.ISubmitJobRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.ISubmitJobRequest|null|undefined, - {}|null|undefined>): void; - submitJob( - request: protos.google.cloud.dataproc.v1.ISubmitJobRequest, - callback: Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.ISubmitJobRequest|null|undefined, - {}|null|undefined>): void; - submitJob( - request?: protos.google.cloud.dataproc.v1.ISubmitJobRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.ISubmitJobRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.ISubmitJobRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.ISubmitJobRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - }); - this.initialize(); - return this.innerApiCalls.submitJob(request, options, callback); - } -/** - * Gets the resource representation for a job in a project. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {string} request.jobId - * Required. The job ID. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [Job]{@link google.cloud.dataproc.v1.Job}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/job_controller.get_job.js - * region_tag:dataproc_v1_generated_JobController_GetJob_async - */ - getJob( - request?: protos.google.cloud.dataproc.v1.IGetJobRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.IGetJobRequest|undefined, {}|undefined - ]>; - getJob( - request: protos.google.cloud.dataproc.v1.IGetJobRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.IGetJobRequest|null|undefined, - {}|null|undefined>): void; - getJob( - request: protos.google.cloud.dataproc.v1.IGetJobRequest, - callback: Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.IGetJobRequest|null|undefined, - {}|null|undefined>): void; - getJob( - request?: protos.google.cloud.dataproc.v1.IGetJobRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.IGetJobRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.IGetJobRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.IGetJobRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - 'job_id': request.jobId ?? '', - }); - this.initialize(); - return this.innerApiCalls.getJob(request, options, callback); - } -/** - * Updates a job in a project. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {string} request.jobId - * Required. The job ID. - * @param {google.cloud.dataproc.v1.Job} request.job - * Required. The changes to the job. - * @param {google.protobuf.FieldMask} request.updateMask - * Required. Specifies the path, relative to Job, of - * the field to update. For example, to update the labels of a Job the - * update_mask parameter would be specified as - * labels, and the `PATCH` request body would specify the new - * value. Note: Currently, labels is the only - * field that can be updated. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [Job]{@link google.cloud.dataproc.v1.Job}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/job_controller.update_job.js - * region_tag:dataproc_v1_generated_JobController_UpdateJob_async - */ - updateJob( - request?: protos.google.cloud.dataproc.v1.IUpdateJobRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.IUpdateJobRequest|undefined, {}|undefined - ]>; - updateJob( - request: protos.google.cloud.dataproc.v1.IUpdateJobRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.IUpdateJobRequest|null|undefined, - {}|null|undefined>): void; - updateJob( - request: protos.google.cloud.dataproc.v1.IUpdateJobRequest, - callback: Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.IUpdateJobRequest|null|undefined, - {}|null|undefined>): void; - updateJob( - request?: protos.google.cloud.dataproc.v1.IUpdateJobRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.IUpdateJobRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.IUpdateJobRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.IUpdateJobRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - 'job_id': request.jobId ?? '', - }); - this.initialize(); - return this.innerApiCalls.updateJob(request, options, callback); - } -/** - * Starts a job cancellation request. To access the job resource - * after cancellation, call - * [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) - * or - * [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {string} request.jobId - * Required. The job ID. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [Job]{@link google.cloud.dataproc.v1.Job}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/job_controller.cancel_job.js - * region_tag:dataproc_v1_generated_JobController_CancelJob_async - */ - cancelJob( - request?: protos.google.cloud.dataproc.v1.ICancelJobRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.ICancelJobRequest|undefined, {}|undefined - ]>; - cancelJob( - request: protos.google.cloud.dataproc.v1.ICancelJobRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.ICancelJobRequest|null|undefined, - {}|null|undefined>): void; - cancelJob( - request: protos.google.cloud.dataproc.v1.ICancelJobRequest, - callback: Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.ICancelJobRequest|null|undefined, - {}|null|undefined>): void; - cancelJob( - request?: protos.google.cloud.dataproc.v1.ICancelJobRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.ICancelJobRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.ICancelJobRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.dataproc.v1.IJob, - protos.google.cloud.dataproc.v1.ICancelJobRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - 'job_id': request.jobId ?? '', - }); - this.initialize(); - return this.innerApiCalls.cancelJob(request, options, callback); - } -/** - * Deletes the job from the project. If the job is active, the delete fails, - * and the response returns `FAILED_PRECONDITION`. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {string} request.jobId - * Required. The job ID. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/job_controller.delete_job.js - * region_tag:dataproc_v1_generated_JobController_DeleteJob_async - */ - deleteJob( - request?: protos.google.cloud.dataproc.v1.IDeleteJobRequest, - options?: CallOptions): - Promise<[ - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteJobRequest|undefined, {}|undefined - ]>; - deleteJob( - request: protos.google.cloud.dataproc.v1.IDeleteJobRequest, - options: CallOptions, - callback: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteJobRequest|null|undefined, - {}|null|undefined>): void; - deleteJob( - request: protos.google.cloud.dataproc.v1.IDeleteJobRequest, - callback: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteJobRequest|null|undefined, - {}|null|undefined>): void; - deleteJob( - request?: protos.google.cloud.dataproc.v1.IDeleteJobRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteJobRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteJobRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteJobRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - 'job_id': request.jobId ?? '', - }); - this.initialize(); - return this.innerApiCalls.deleteJob(request, options, callback); - } - -/** - * Submits job to a cluster. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {google.cloud.dataproc.v1.Job} request.job - * Required. The job resource. - * @param {string} [request.requestId] - * Optional. A unique id used to identify the request. If the server - * receives two - * [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s - * with the same id, then the second request will be ignored and the - * first {@link google.cloud.dataproc.v1.Job|Job} created and stored in the backend - * is returned. - * - * It is recommended to always set this value to a - * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - * - * The id must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/job_controller.submit_job_as_operation.js - * region_tag:dataproc_v1_generated_JobController_SubmitJobAsOperation_async - */ - submitJobAsOperation( - request?: protos.google.cloud.dataproc.v1.ISubmitJobRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - submitJobAsOperation( - request: protos.google.cloud.dataproc.v1.ISubmitJobRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - submitJobAsOperation( - request: protos.google.cloud.dataproc.v1.ISubmitJobRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - submitJobAsOperation( - request?: protos.google.cloud.dataproc.v1.ISubmitJobRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - }); - this.initialize(); - return this.innerApiCalls.submitJobAsOperation(request, options, callback); - } -/** - * Check the status of the long running operation returned by `submitJobAsOperation()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/job_controller.submit_job_as_operation.js - * region_tag:dataproc_v1_generated_JobController_SubmitJobAsOperation_async - */ - async checkSubmitJobAsOperationProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.submitJobAsOperation, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } - /** - * Lists regions/{region}/jobs in a project. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {number} [request.pageSize] - * Optional. The number of results to return in each response. - * @param {string} [request.pageToken] - * Optional. The page token, returned by a previous call, to request the - * next page of results. - * @param {string} [request.clusterName] - * Optional. If set, the returned jobs list includes only jobs that were - * submitted to the named cluster. - * @param {google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher} [request.jobStateMatcher] - * Optional. Specifies enumerated categories of jobs to list. - * (default = match ALL jobs). - * - * If `filter` is provided, `jobStateMatcher` will be ignored. - * @param {string} [request.filter] - * Optional. A filter constraining the jobs to list. Filters are - * case-sensitive and have the following syntax: - * - * [field = value] AND [field [= value]] ... - * - * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label - * key. **value** can be `*` to match all values. - * `status.state` can be either `ACTIVE` or `NON_ACTIVE`. - * Only the logical `AND` operator is supported; space-separated items are - * treated as having an implicit `AND` operator. - * - * Example filter: - * - * status.state = ACTIVE AND labels.env = staging AND labels.starred = * - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is Array of [Job]{@link google.cloud.dataproc.v1.Job}. - * The client library will perform auto-pagination by default: it will call the API as many - * times as needed and will merge results from all the pages into this array. - * Note that it can affect your quota. - * We recommend using `listJobsAsync()` - * method described below for async iteration which you can stop as needed. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - */ - listJobs( - request?: protos.google.cloud.dataproc.v1.IListJobsRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IJob[], - protos.google.cloud.dataproc.v1.IListJobsRequest|null, - protos.google.cloud.dataproc.v1.IListJobsResponse - ]>; - listJobs( - request: protos.google.cloud.dataproc.v1.IListJobsRequest, - options: CallOptions, - callback: PaginationCallback< - protos.google.cloud.dataproc.v1.IListJobsRequest, - protos.google.cloud.dataproc.v1.IListJobsResponse|null|undefined, - protos.google.cloud.dataproc.v1.IJob>): void; - listJobs( - request: protos.google.cloud.dataproc.v1.IListJobsRequest, - callback: PaginationCallback< - protos.google.cloud.dataproc.v1.IListJobsRequest, - protos.google.cloud.dataproc.v1.IListJobsResponse|null|undefined, - protos.google.cloud.dataproc.v1.IJob>): void; - listJobs( - request?: protos.google.cloud.dataproc.v1.IListJobsRequest, - optionsOrCallback?: CallOptions|PaginationCallback< - protos.google.cloud.dataproc.v1.IListJobsRequest, - protos.google.cloud.dataproc.v1.IListJobsResponse|null|undefined, - protos.google.cloud.dataproc.v1.IJob>, - callback?: PaginationCallback< - protos.google.cloud.dataproc.v1.IListJobsRequest, - protos.google.cloud.dataproc.v1.IListJobsResponse|null|undefined, - protos.google.cloud.dataproc.v1.IJob>): - Promise<[ - protos.google.cloud.dataproc.v1.IJob[], - protos.google.cloud.dataproc.v1.IListJobsRequest|null, - protos.google.cloud.dataproc.v1.IListJobsResponse - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - }); - this.initialize(); - return this.innerApiCalls.listJobs(request, options, callback); - } - -/** - * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {number} [request.pageSize] - * Optional. The number of results to return in each response. - * @param {string} [request.pageToken] - * Optional. The page token, returned by a previous call, to request the - * next page of results. - * @param {string} [request.clusterName] - * Optional. If set, the returned jobs list includes only jobs that were - * submitted to the named cluster. - * @param {google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher} [request.jobStateMatcher] - * Optional. Specifies enumerated categories of jobs to list. - * (default = match ALL jobs). - * - * If `filter` is provided, `jobStateMatcher` will be ignored. - * @param {string} [request.filter] - * Optional. A filter constraining the jobs to list. Filters are - * case-sensitive and have the following syntax: - * - * [field = value] AND [field [= value]] ... - * - * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label - * key. **value** can be `*` to match all values. - * `status.state` can be either `ACTIVE` or `NON_ACTIVE`. - * Only the logical `AND` operator is supported; space-separated items are - * treated as having an implicit `AND` operator. - * - * Example filter: - * - * status.state = ACTIVE AND labels.env = staging AND labels.starred = * - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Stream} - * An object stream which emits an object representing [Job]{@link google.cloud.dataproc.v1.Job} on 'data' event. - * The client library will perform auto-pagination by default: it will call the API as many - * times as needed. Note that it can affect your quota. - * We recommend using `listJobsAsync()` - * method described below for async iteration which you can stop as needed. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - */ - listJobsStream( - request?: protos.google.cloud.dataproc.v1.IListJobsRequest, - options?: CallOptions): - Transform{ - request = request || {}; - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - }); - const defaultCallSettings = this._defaults['listJobs']; - const callSettings = defaultCallSettings.merge(options); - this.initialize(); - return this.descriptors.page.listJobs.createStream( - this.innerApiCalls.listJobs as GaxCall, - request, - callSettings - ); - } - -/** - * Equivalent to `listJobs`, but returns an iterable object. - * - * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. - * @param {Object} request - * The request object that will be sent. - * @param {string} request.projectId - * Required. The ID of the Google Cloud Platform project that the job - * belongs to. - * @param {string} request.region - * Required. The Dataproc region in which to handle the request. - * @param {number} [request.pageSize] - * Optional. The number of results to return in each response. - * @param {string} [request.pageToken] - * Optional. The page token, returned by a previous call, to request the - * next page of results. - * @param {string} [request.clusterName] - * Optional. If set, the returned jobs list includes only jobs that were - * submitted to the named cluster. - * @param {google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher} [request.jobStateMatcher] - * Optional. Specifies enumerated categories of jobs to list. - * (default = match ALL jobs). - * - * If `filter` is provided, `jobStateMatcher` will be ignored. - * @param {string} [request.filter] - * Optional. A filter constraining the jobs to list. Filters are - * case-sensitive and have the following syntax: - * - * [field = value] AND [field [= value]] ... - * - * where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label - * key. **value** can be `*` to match all values. - * `status.state` can be either `ACTIVE` or `NON_ACTIVE`. - * Only the logical `AND` operator is supported; space-separated items are - * treated as having an implicit `AND` operator. - * - * Example filter: - * - * status.state = ACTIVE AND labels.env = staging AND labels.starred = * - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Object} - * An iterable Object that allows [async iteration](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols). - * When you iterate the returned iterable, each element will be an object representing - * [Job]{@link google.cloud.dataproc.v1.Job}. The API will be called under the hood as needed, once per the page, - * so you can stop the iteration when you don't need more results. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - * @example include:samples/generated/v1/job_controller.list_jobs.js - * region_tag:dataproc_v1_generated_JobController_ListJobs_async - */ - listJobsAsync( - request?: protos.google.cloud.dataproc.v1.IListJobsRequest, - options?: CallOptions): - AsyncIterable{ - request = request || {}; - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'project_id': request.projectId ?? '', - 'region': request.region ?? '', - }); - const defaultCallSettings = this._defaults['listJobs']; - const callSettings = defaultCallSettings.merge(options); - this.initialize(); - return this.descriptors.page.listJobs.asyncIterate( - this.innerApiCalls['listJobs'] as GaxCall, - request as {}, - callSettings - ) as AsyncIterable; - } - // -------------------- - // -- Path templates -- - // -------------------- - - /** - * Return a fully-qualified batch resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} batch - * @returns {string} Resource name string. - */ - batchPath(project:string,location:string,batch:string) { - return this.pathTemplates.batchPathTemplate.render({ - project: project, - location: location, - batch: batch, - }); - } - - /** - * Parse the project from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the project. - */ - matchProjectFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).project; - } - - /** - * Parse the location from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the location. - */ - matchLocationFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).location; - } - - /** - * Parse the batch from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the batch. - */ - matchBatchFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).batch; - } - - /** - * Return a fully-qualified nodeGroup resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} cluster - * @param {string} node_group - * @returns {string} Resource name string. - */ - nodeGroupPath(project:string,region:string,cluster:string,nodeGroup:string) { - return this.pathTemplates.nodeGroupPathTemplate.render({ - project: project, - region: region, - cluster: cluster, - node_group: nodeGroup, - }); - } - - /** - * Parse the project from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the project. - */ - matchProjectFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).project; - } - - /** - * Parse the region from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the region. - */ - matchRegionFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; - } - - /** - * Parse the cluster from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the cluster. - */ - matchClusterFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).cluster; - } - - /** - * Parse the node_group from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the node_group. - */ - matchNodeGroupFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).node_group; - } - - /** - * Return a fully-qualified projectLocationAutoscalingPolicy resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} autoscaling_policy - * @returns {string} Resource name string. - */ - projectLocationAutoscalingPolicyPath(project:string,location:string,autoscalingPolicy:string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render({ - project: project, - location: location, - autoscaling_policy: autoscalingPolicy, - }); - } - - /** - * Parse the project from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).project; - } - - /** - * Parse the location from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the location. - */ - matchLocationFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).location; - } - - /** - * Parse the autoscaling_policy from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the autoscaling_policy. - */ - matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).autoscaling_policy; - } - - /** - * Return a fully-qualified projectLocationWorkflowTemplate resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} workflow_template - * @returns {string} Resource name string. - */ - projectLocationWorkflowTemplatePath(project:string,location:string,workflowTemplate:string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render({ - project: project, - location: location, - workflow_template: workflowTemplate, - }); - } - - /** - * Parse the project from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).project; - } - - /** - * Parse the location from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the location. - */ - matchLocationFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).location; - } - - /** - * Parse the workflow_template from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the workflow_template. - */ - matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).workflow_template; - } - - /** - * Return a fully-qualified projectRegionAutoscalingPolicy resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} autoscaling_policy - * @returns {string} Resource name string. - */ - projectRegionAutoscalingPolicyPath(project:string,region:string,autoscalingPolicy:string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render({ - project: project, - region: region, - autoscaling_policy: autoscalingPolicy, - }); - } - - /** - * Parse the project from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).project; - } - - /** - * Parse the region from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the region. - */ - matchRegionFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).region; - } - - /** - * Parse the autoscaling_policy from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the autoscaling_policy. - */ - matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).autoscaling_policy; - } - - /** - * Return a fully-qualified projectRegionWorkflowTemplate resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} workflow_template - * @returns {string} Resource name string. - */ - projectRegionWorkflowTemplatePath(project:string,region:string,workflowTemplate:string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render({ - project: project, - region: region, - workflow_template: workflowTemplate, - }); - } - - /** - * Parse the project from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).project; - } - - /** - * Parse the region from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the region. - */ - matchRegionFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).region; - } - - /** - * Parse the workflow_template from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the workflow_template. - */ - matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).workflow_template; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.jobControllerStub && !this._terminated) { - return this.jobControllerStub.then(stub => { - this._terminated = true; - stub.close(); - this.operationsClient.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client_config.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client_config.json deleted file mode 100644 index 5d75711034e..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_client_config.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "interfaces": { - "google.cloud.dataproc.v1.JobController": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ], - "unavailable": [ - "UNAVAILABLE" - ], - "deadline_exceeded_internal_unavailable": [ - "DEADLINE_EXCEEDED", - "INTERNAL", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "SubmitJob": { - "timeout_millis": 900000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - }, - "SubmitJobAsOperation": { - "timeout_millis": 900000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - }, - "GetJob": { - "timeout_millis": 900000, - "retry_codes_name": "deadline_exceeded_internal_unavailable", - "retry_params_name": "default" - }, - "ListJobs": { - "timeout_millis": 900000, - "retry_codes_name": "deadline_exceeded_internal_unavailable", - "retry_params_name": "default" - }, - "UpdateJob": { - "timeout_millis": 900000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - }, - "CancelJob": { - "timeout_millis": 900000, - "retry_codes_name": "deadline_exceeded_internal_unavailable", - "retry_params_name": "default" - }, - "DeleteJob": { - "timeout_millis": 900000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - } - } - } - } -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_proto_list.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_proto_list.json deleted file mode 100644 index 3bb7ccf055a..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/job_controller_proto_list.json +++ /dev/null @@ -1,10 +0,0 @@ -[ - "../../protos/google/cloud/dataproc/v1/autoscaling_policies.proto", - "../../protos/google/cloud/dataproc/v1/batches.proto", - "../../protos/google/cloud/dataproc/v1/clusters.proto", - "../../protos/google/cloud/dataproc/v1/jobs.proto", - "../../protos/google/cloud/dataproc/v1/node_groups.proto", - "../../protos/google/cloud/dataproc/v1/operations.proto", - "../../protos/google/cloud/dataproc/v1/shared.proto", - "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" -] diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client.ts b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client.ts deleted file mode 100644 index ab191a6ed7d..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client.ts +++ /dev/null @@ -1,1561 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import type * as gax from 'google-gax'; -import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation, PaginationCallback, GaxCall} from 'google-gax'; -import {Transform} from 'stream'; -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1/workflow_template_service_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './workflow_template_service_client_config.json'; -const version = require('../../../package.json').version; - -/** - * The API interface for managing Workflow Templates in the - * Dataproc API. - * @class - * @memberof v1 - */ -export class WorkflowTemplateServiceClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - pathTemplates: {[name: string]: gax.PathTemplate}; - operationsClient: gax.OperationsClient; - workflowTemplateServiceStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of WorkflowTemplateServiceClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you - * need to avoid loading the default gRPC version and want to use the fallback - * HTTP implementation. Load only fallback version and pass it to the constructor: - * ``` - * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC - * const client = new WorkflowTemplateServiceClient({fallback: 'rest'}, gax); - * ``` - */ - constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof WorkflowTemplateServiceClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // Request numeric enum values if REST transport is used. - opts.numericEnums = true; - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Load google-gax module synchronously if needed - if (!gaxInstance) { - gaxInstance = require('google-gax') as typeof gax; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - // This API contains "path templates"; forward-slash-separated - // identifiers to uniquely identify resources within the API. - // Create useful helper objects for these. - this.pathTemplates = { - batchPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/batches/{batch}' - ), - nodeGroupPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' - ), - projectPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}' - ), - projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' - ), - projectLocationWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/workflowTemplates/{workflow_template}' - ), - projectRegionAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}' - ), - projectRegionWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' - ), - regionPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}' - ), - }; - - // Some of the methods on this service return "paged" results, - // (e.g. 50 results at a time, with tokens to get subsequent - // pages). Denote the keys used for pagination and results. - this.descriptors.page = { - listWorkflowTemplates: - new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'templates') - }; - - const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a - // an Operation object that allows for tracking of the operation, - // rather than holding a request open. - const lroOptions: GrpcClientOptions = { - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined - }; - if (opts.fallback === 'rest') { - lroOptions.protoJson = protoFilesRoot; - lroOptions.httpRules = [{selector: 'google.iam.v1.IAMPolicy.GetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:getIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:getIamPolicy',body: '*',}], - },{selector: 'google.iam.v1.IAMPolicy.SetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:setIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:setIamPolicy',body: '*',}], - },{selector: 'google.iam.v1.IAMPolicy.TestIamPermissions',post: '/v1/{resource=projects/*/regions/*/clusters/*}:testIamPermissions',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:testIamPermissions',body: '*',}], - },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1/{name=projects/*/regions/*/operations/*}:cancel',},{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.ListOperations',get: '/v1/{name=projects/*/regions/*/operations}',}]; - } - this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); - const instantiateWorkflowTemplateResponse = protoFilesRoot.lookup( - '.google.protobuf.Empty') as gax.protobuf.Type; - const instantiateWorkflowTemplateMetadata = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.WorkflowMetadata') as gax.protobuf.Type; - const instantiateInlineWorkflowTemplateResponse = protoFilesRoot.lookup( - '.google.protobuf.Empty') as gax.protobuf.Type; - const instantiateInlineWorkflowTemplateMetadata = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.WorkflowMetadata') as gax.protobuf.Type; - - this.descriptors.longrunning = { - instantiateWorkflowTemplate: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - instantiateWorkflowTemplateResponse.decode.bind(instantiateWorkflowTemplateResponse), - instantiateWorkflowTemplateMetadata.decode.bind(instantiateWorkflowTemplateMetadata)), - instantiateInlineWorkflowTemplate: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - instantiateInlineWorkflowTemplateResponse.decode.bind(instantiateInlineWorkflowTemplateResponse), - instantiateInlineWorkflowTemplateMetadata.decode.bind(instantiateInlineWorkflowTemplateMetadata)) - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.dataproc.v1.WorkflowTemplateService', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = this._gaxModule.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.workflowTemplateServiceStub) { - return this.workflowTemplateServiceStub; - } - - // Put together the "service stub" for - // google.cloud.dataproc.v1.WorkflowTemplateService. - this.workflowTemplateServiceStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.dataproc.v1.WorkflowTemplateService') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.dataproc.v1.WorkflowTemplateService, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const workflowTemplateServiceStubMethods = - ['createWorkflowTemplate', 'getWorkflowTemplate', 'instantiateWorkflowTemplate', 'instantiateInlineWorkflowTemplate', 'updateWorkflowTemplate', 'listWorkflowTemplates', 'deleteWorkflowTemplate']; - for (const methodName of workflowTemplateServiceStubMethods) { - const callPromise = this.workflowTemplateServiceStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.page[methodName] || - this.descriptors.longrunning[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor, - this._opts.fallback - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.workflowTemplateServiceStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'dataproc.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'dataproc.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- -/** - * Creates new workflow template. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The resource name of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.workflowTemplates.create`, the resource name of the - * region has the following format: - * `projects/{project_id}/regions/{region}` - * - * * For `projects.locations.workflowTemplates.create`, the resource name of - * the location has the following format: - * `projects/{project_id}/locations/{location}` - * @param {google.cloud.dataproc.v1.WorkflowTemplate} request.template - * Required. The Dataproc workflow template to create. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [WorkflowTemplate]{@link google.cloud.dataproc.v1.WorkflowTemplate}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/workflow_template_service.create_workflow_template.js - * region_tag:dataproc_v1_generated_WorkflowTemplateService_CreateWorkflowTemplate_async - */ - createWorkflowTemplate( - request?: protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest|undefined, {}|undefined - ]>; - createWorkflowTemplate( - request: protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest|null|undefined, - {}|null|undefined>): void; - createWorkflowTemplate( - request: protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest, - callback: Callback< - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest|null|undefined, - {}|null|undefined>): void; - createWorkflowTemplate( - request?: protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.ICreateWorkflowTemplateRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - this.initialize(); - return this.innerApiCalls.createWorkflowTemplate(request, options, callback); - } -/** - * Retrieves the latest workflow template. - * - * Can retrieve previously instantiated template by specifying optional - * version parameter. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.name - * Required. The resource name of the workflow template, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.workflowTemplates.get`, the resource name of the - * template has the following format: - * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` - * - * * For `projects.locations.workflowTemplates.get`, the resource name of the - * template has the following format: - * `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` - * @param {number} [request.version] - * Optional. The version of workflow template to retrieve. Only previously - * instantiated versions can be retrieved. - * - * If unspecified, retrieves the current version. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [WorkflowTemplate]{@link google.cloud.dataproc.v1.WorkflowTemplate}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/workflow_template_service.get_workflow_template.js - * region_tag:dataproc_v1_generated_WorkflowTemplateService_GetWorkflowTemplate_async - */ - getWorkflowTemplate( - request?: protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest|undefined, {}|undefined - ]>; - getWorkflowTemplate( - request: protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest|null|undefined, - {}|null|undefined>): void; - getWorkflowTemplate( - request: protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest, - callback: Callback< - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest|null|undefined, - {}|null|undefined>): void; - getWorkflowTemplate( - request?: protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.IGetWorkflowTemplateRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'name': request.name ?? '', - }); - this.initialize(); - return this.innerApiCalls.getWorkflowTemplate(request, options, callback); - } -/** - * Updates (replaces) workflow template. The updated template - * must contain version that matches the current server version. - * - * @param {Object} request - * The request object that will be sent. - * @param {google.cloud.dataproc.v1.WorkflowTemplate} request.template - * Required. The updated workflow template. - * - * The `template.version` field must match the current version. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [WorkflowTemplate]{@link google.cloud.dataproc.v1.WorkflowTemplate}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/workflow_template_service.update_workflow_template.js - * region_tag:dataproc_v1_generated_WorkflowTemplateService_UpdateWorkflowTemplate_async - */ - updateWorkflowTemplate( - request?: protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest|undefined, {}|undefined - ]>; - updateWorkflowTemplate( - request: protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest|null|undefined, - {}|null|undefined>): void; - updateWorkflowTemplate( - request: protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest, - callback: Callback< - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest|null|undefined, - {}|null|undefined>): void; - updateWorkflowTemplate( - request?: protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.dataproc.v1.IWorkflowTemplate, - protos.google.cloud.dataproc.v1.IUpdateWorkflowTemplateRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'template.name': request.template!.name ?? '', - }); - this.initialize(); - return this.innerApiCalls.updateWorkflowTemplate(request, options, callback); - } -/** - * Deletes a workflow template. It does not cancel in-progress workflows. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.name - * Required. The resource name of the workflow template, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.workflowTemplates.delete`, the resource name - * of the template has the following format: - * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` - * - * * For `projects.locations.workflowTemplates.instantiate`, the resource name - * of the template has the following format: - * `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` - * @param {number} [request.version] - * Optional. The version of workflow template to delete. If specified, - * will only delete the template if the current server version matches - * specified version. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/workflow_template_service.delete_workflow_template.js - * region_tag:dataproc_v1_generated_WorkflowTemplateService_DeleteWorkflowTemplate_async - */ - deleteWorkflowTemplate( - request?: protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest, - options?: CallOptions): - Promise<[ - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest|undefined, {}|undefined - ]>; - deleteWorkflowTemplate( - request: protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest, - options: CallOptions, - callback: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest|null|undefined, - {}|null|undefined>): void; - deleteWorkflowTemplate( - request: protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest, - callback: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest|null|undefined, - {}|null|undefined>): void; - deleteWorkflowTemplate( - request?: protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.protobuf.IEmpty, - protos.google.cloud.dataproc.v1.IDeleteWorkflowTemplateRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'name': request.name ?? '', - }); - this.initialize(); - return this.innerApiCalls.deleteWorkflowTemplate(request, options, callback); - } - -/** - * Instantiates a template and begins execution. - * - * The returned Operation can be used to track execution of - * workflow by polling - * {@link google.longrunning.Operations.GetOperation|operations.get}. - * The Operation will complete when entire workflow is finished. - * - * The running workflow can be aborted via - * {@link google.longrunning.Operations.CancelOperation|operations.cancel}. - * This will cause any inflight jobs to be cancelled and workflow-owned - * clusters to be deleted. - * - * The {@link google.longrunning.Operation.metadata|Operation.metadata} will be - * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). - * Also see [Using - * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). - * - * On successful completion, - * {@link google.longrunning.Operation.response|Operation.response} will be - * {@link google.protobuf.Empty|Empty}. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.name - * Required. The resource name of the workflow template, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.workflowTemplates.instantiate`, the resource name - * of the template has the following format: - * `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` - * - * * For `projects.locations.workflowTemplates.instantiate`, the resource name - * of the template has the following format: - * `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}` - * @param {number} [request.version] - * Optional. The version of workflow template to instantiate. If specified, - * the workflow will be instantiated only if the current version of - * the workflow template has the supplied version. - * - * This option cannot be used to instantiate a previous version of - * workflow template. - * @param {string} [request.requestId] - * Optional. A tag that prevents multiple concurrent workflow - * instances with the same tag from running. This mitigates risk of - * concurrent instances started due to retries. - * - * It is recommended to always set this value to a - * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - * - * The tag must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - * @param {number[]} [request.parameters] - * Optional. Map from parameter names to values that should be used for those - * parameters. Values may not exceed 1000 characters. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/workflow_template_service.instantiate_workflow_template.js - * region_tag:dataproc_v1_generated_WorkflowTemplateService_InstantiateWorkflowTemplate_async - */ - instantiateWorkflowTemplate( - request?: protos.google.cloud.dataproc.v1.IInstantiateWorkflowTemplateRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - instantiateWorkflowTemplate( - request: protos.google.cloud.dataproc.v1.IInstantiateWorkflowTemplateRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - instantiateWorkflowTemplate( - request: protos.google.cloud.dataproc.v1.IInstantiateWorkflowTemplateRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - instantiateWorkflowTemplate( - request?: protos.google.cloud.dataproc.v1.IInstantiateWorkflowTemplateRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'name': request.name ?? '', - }); - this.initialize(); - return this.innerApiCalls.instantiateWorkflowTemplate(request, options, callback); - } -/** - * Check the status of the long running operation returned by `instantiateWorkflowTemplate()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/workflow_template_service.instantiate_workflow_template.js - * region_tag:dataproc_v1_generated_WorkflowTemplateService_InstantiateWorkflowTemplate_async - */ - async checkInstantiateWorkflowTemplateProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.instantiateWorkflowTemplate, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } -/** - * Instantiates a template and begins execution. - * - * This method is equivalent to executing the sequence - * {@link google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate|CreateWorkflowTemplate}, {@link google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate|InstantiateWorkflowTemplate}, - * {@link google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate|DeleteWorkflowTemplate}. - * - * The returned Operation can be used to track execution of - * workflow by polling - * {@link google.longrunning.Operations.GetOperation|operations.get}. - * The Operation will complete when entire workflow is finished. - * - * The running workflow can be aborted via - * {@link google.longrunning.Operations.CancelOperation|operations.cancel}. - * This will cause any inflight jobs to be cancelled and workflow-owned - * clusters to be deleted. - * - * The {@link google.longrunning.Operation.metadata|Operation.metadata} will be - * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). - * Also see [Using - * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). - * - * On successful completion, - * {@link google.longrunning.Operation.response|Operation.response} will be - * {@link google.protobuf.Empty|Empty}. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The resource name of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.workflowTemplates,instantiateinline`, the resource - * name of the region has the following format: - * `projects/{project_id}/regions/{region}` - * - * * For `projects.locations.workflowTemplates.instantiateinline`, the - * resource name of the location has the following format: - * `projects/{project_id}/locations/{location}` - * @param {google.cloud.dataproc.v1.WorkflowTemplate} request.template - * Required. The workflow template to instantiate. - * @param {string} [request.requestId] - * Optional. A tag that prevents multiple concurrent workflow - * instances with the same tag from running. This mitigates risk of - * concurrent instances started due to retries. - * - * It is recommended to always set this value to a - * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - * - * The tag must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/workflow_template_service.instantiate_inline_workflow_template.js - * region_tag:dataproc_v1_generated_WorkflowTemplateService_InstantiateInlineWorkflowTemplate_async - */ - instantiateInlineWorkflowTemplate( - request?: protos.google.cloud.dataproc.v1.IInstantiateInlineWorkflowTemplateRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - instantiateInlineWorkflowTemplate( - request: protos.google.cloud.dataproc.v1.IInstantiateInlineWorkflowTemplateRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - instantiateInlineWorkflowTemplate( - request: protos.google.cloud.dataproc.v1.IInstantiateInlineWorkflowTemplateRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - instantiateInlineWorkflowTemplate( - request?: protos.google.cloud.dataproc.v1.IInstantiateInlineWorkflowTemplateRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - this.initialize(); - return this.innerApiCalls.instantiateInlineWorkflowTemplate(request, options, callback); - } -/** - * Check the status of the long running operation returned by `instantiateInlineWorkflowTemplate()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/workflow_template_service.instantiate_inline_workflow_template.js - * region_tag:dataproc_v1_generated_WorkflowTemplateService_InstantiateInlineWorkflowTemplate_async - */ - async checkInstantiateInlineWorkflowTemplateProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.instantiateInlineWorkflowTemplate, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } - /** - * Lists workflows that match the specified filter in the request. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The resource name of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.workflowTemplates,list`, the resource - * name of the region has the following format: - * `projects/{project_id}/regions/{region}` - * - * * For `projects.locations.workflowTemplates.list`, the - * resource name of the location has the following format: - * `projects/{project_id}/locations/{location}` - * @param {number} [request.pageSize] - * Optional. The maximum number of results to return in each response. - * @param {string} [request.pageToken] - * Optional. The page token, returned by a previous call, to request the - * next page of results. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is Array of [WorkflowTemplate]{@link google.cloud.dataproc.v1.WorkflowTemplate}. - * The client library will perform auto-pagination by default: it will call the API as many - * times as needed and will merge results from all the pages into this array. - * Note that it can affect your quota. - * We recommend using `listWorkflowTemplatesAsync()` - * method described below for async iteration which you can stop as needed. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - */ - listWorkflowTemplates( - request?: protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.IWorkflowTemplate[], - protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest|null, - protos.google.cloud.dataproc.v1.IListWorkflowTemplatesResponse - ]>; - listWorkflowTemplates( - request: protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, - options: CallOptions, - callback: PaginationCallback< - protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, - protos.google.cloud.dataproc.v1.IListWorkflowTemplatesResponse|null|undefined, - protos.google.cloud.dataproc.v1.IWorkflowTemplate>): void; - listWorkflowTemplates( - request: protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, - callback: PaginationCallback< - protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, - protos.google.cloud.dataproc.v1.IListWorkflowTemplatesResponse|null|undefined, - protos.google.cloud.dataproc.v1.IWorkflowTemplate>): void; - listWorkflowTemplates( - request?: protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, - optionsOrCallback?: CallOptions|PaginationCallback< - protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, - protos.google.cloud.dataproc.v1.IListWorkflowTemplatesResponse|null|undefined, - protos.google.cloud.dataproc.v1.IWorkflowTemplate>, - callback?: PaginationCallback< - protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, - protos.google.cloud.dataproc.v1.IListWorkflowTemplatesResponse|null|undefined, - protos.google.cloud.dataproc.v1.IWorkflowTemplate>): - Promise<[ - protos.google.cloud.dataproc.v1.IWorkflowTemplate[], - protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest|null, - protos.google.cloud.dataproc.v1.IListWorkflowTemplatesResponse - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - this.initialize(); - return this.innerApiCalls.listWorkflowTemplates(request, options, callback); - } - -/** - * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The resource name of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.workflowTemplates,list`, the resource - * name of the region has the following format: - * `projects/{project_id}/regions/{region}` - * - * * For `projects.locations.workflowTemplates.list`, the - * resource name of the location has the following format: - * `projects/{project_id}/locations/{location}` - * @param {number} [request.pageSize] - * Optional. The maximum number of results to return in each response. - * @param {string} [request.pageToken] - * Optional. The page token, returned by a previous call, to request the - * next page of results. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Stream} - * An object stream which emits an object representing [WorkflowTemplate]{@link google.cloud.dataproc.v1.WorkflowTemplate} on 'data' event. - * The client library will perform auto-pagination by default: it will call the API as many - * times as needed. Note that it can affect your quota. - * We recommend using `listWorkflowTemplatesAsync()` - * method described below for async iteration which you can stop as needed. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - */ - listWorkflowTemplatesStream( - request?: protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, - options?: CallOptions): - Transform{ - request = request || {}; - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - const defaultCallSettings = this._defaults['listWorkflowTemplates']; - const callSettings = defaultCallSettings.merge(options); - this.initialize(); - return this.descriptors.page.listWorkflowTemplates.createStream( - this.innerApiCalls.listWorkflowTemplates as GaxCall, - request, - callSettings - ); - } - -/** - * Equivalent to `listWorkflowTemplates`, but returns an iterable object. - * - * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The resource name of the region or location, as described - * in https://cloud.google.com/apis/design/resource_names. - * - * * For `projects.regions.workflowTemplates,list`, the resource - * name of the region has the following format: - * `projects/{project_id}/regions/{region}` - * - * * For `projects.locations.workflowTemplates.list`, the - * resource name of the location has the following format: - * `projects/{project_id}/locations/{location}` - * @param {number} [request.pageSize] - * Optional. The maximum number of results to return in each response. - * @param {string} [request.pageToken] - * Optional. The page token, returned by a previous call, to request the - * next page of results. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Object} - * An iterable Object that allows [async iteration](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols). - * When you iterate the returned iterable, each element will be an object representing - * [WorkflowTemplate]{@link google.cloud.dataproc.v1.WorkflowTemplate}. The API will be called under the hood as needed, once per the page, - * so you can stop the iteration when you don't need more results. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination) - * for more details and examples. - * @example include:samples/generated/v1/workflow_template_service.list_workflow_templates.js - * region_tag:dataproc_v1_generated_WorkflowTemplateService_ListWorkflowTemplates_async - */ - listWorkflowTemplatesAsync( - request?: protos.google.cloud.dataproc.v1.IListWorkflowTemplatesRequest, - options?: CallOptions): - AsyncIterable{ - request = request || {}; - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); - const defaultCallSettings = this._defaults['listWorkflowTemplates']; - const callSettings = defaultCallSettings.merge(options); - this.initialize(); - return this.descriptors.page.listWorkflowTemplates.asyncIterate( - this.innerApiCalls['listWorkflowTemplates'] as GaxCall, - request as {}, - callSettings - ) as AsyncIterable; - } - // -------------------- - // -- Path templates -- - // -------------------- - - /** - * Return a fully-qualified batch resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} batch - * @returns {string} Resource name string. - */ - batchPath(project:string,location:string,batch:string) { - return this.pathTemplates.batchPathTemplate.render({ - project: project, - location: location, - batch: batch, - }); - } - - /** - * Parse the project from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the project. - */ - matchProjectFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).project; - } - - /** - * Parse the location from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the location. - */ - matchLocationFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).location; - } - - /** - * Parse the batch from Batch resource. - * - * @param {string} batchName - * A fully-qualified path representing Batch resource. - * @returns {string} A string representing the batch. - */ - matchBatchFromBatchName(batchName: string) { - return this.pathTemplates.batchPathTemplate.match(batchName).batch; - } - - /** - * Return a fully-qualified nodeGroup resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} cluster - * @param {string} node_group - * @returns {string} Resource name string. - */ - nodeGroupPath(project:string,region:string,cluster:string,nodeGroup:string) { - return this.pathTemplates.nodeGroupPathTemplate.render({ - project: project, - region: region, - cluster: cluster, - node_group: nodeGroup, - }); - } - - /** - * Parse the project from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the project. - */ - matchProjectFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).project; - } - - /** - * Parse the region from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the region. - */ - matchRegionFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; - } - - /** - * Parse the cluster from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the cluster. - */ - matchClusterFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).cluster; - } - - /** - * Parse the node_group from NodeGroup resource. - * - * @param {string} nodeGroupName - * A fully-qualified path representing NodeGroup resource. - * @returns {string} A string representing the node_group. - */ - matchNodeGroupFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).node_group; - } - - /** - * Return a fully-qualified project resource name string. - * - * @param {string} project - * @returns {string} Resource name string. - */ - projectPath(project:string) { - return this.pathTemplates.projectPathTemplate.render({ - project: project, - }); - } - - /** - * Parse the project from Project resource. - * - * @param {string} projectName - * A fully-qualified path representing Project resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectName(projectName: string) { - return this.pathTemplates.projectPathTemplate.match(projectName).project; - } - - /** - * Return a fully-qualified projectLocationAutoscalingPolicy resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} autoscaling_policy - * @returns {string} Resource name string. - */ - projectLocationAutoscalingPolicyPath(project:string,location:string,autoscalingPolicy:string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render({ - project: project, - location: location, - autoscaling_policy: autoscalingPolicy, - }); - } - - /** - * Parse the project from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).project; - } - - /** - * Parse the location from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the location. - */ - matchLocationFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).location; - } - - /** - * Parse the autoscaling_policy from ProjectLocationAutoscalingPolicy resource. - * - * @param {string} projectLocationAutoscalingPolicyName - * A fully-qualified path representing project_location_autoscaling_policy resource. - * @returns {string} A string representing the autoscaling_policy. - */ - matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).autoscaling_policy; - } - - /** - * Return a fully-qualified projectLocationWorkflowTemplate resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} workflow_template - * @returns {string} Resource name string. - */ - projectLocationWorkflowTemplatePath(project:string,location:string,workflowTemplate:string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render({ - project: project, - location: location, - workflow_template: workflowTemplate, - }); - } - - /** - * Parse the project from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).project; - } - - /** - * Parse the location from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the location. - */ - matchLocationFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).location; - } - - /** - * Parse the workflow_template from ProjectLocationWorkflowTemplate resource. - * - * @param {string} projectLocationWorkflowTemplateName - * A fully-qualified path representing project_location_workflow_template resource. - * @returns {string} A string representing the workflow_template. - */ - matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).workflow_template; - } - - /** - * Return a fully-qualified projectRegionAutoscalingPolicy resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} autoscaling_policy - * @returns {string} Resource name string. - */ - projectRegionAutoscalingPolicyPath(project:string,region:string,autoscalingPolicy:string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render({ - project: project, - region: region, - autoscaling_policy: autoscalingPolicy, - }); - } - - /** - * Parse the project from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).project; - } - - /** - * Parse the region from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the region. - */ - matchRegionFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).region; - } - - /** - * Parse the autoscaling_policy from ProjectRegionAutoscalingPolicy resource. - * - * @param {string} projectRegionAutoscalingPolicyName - * A fully-qualified path representing project_region_autoscaling_policy resource. - * @returns {string} A string representing the autoscaling_policy. - */ - matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).autoscaling_policy; - } - - /** - * Return a fully-qualified projectRegionWorkflowTemplate resource name string. - * - * @param {string} project - * @param {string} region - * @param {string} workflow_template - * @returns {string} Resource name string. - */ - projectRegionWorkflowTemplatePath(project:string,region:string,workflowTemplate:string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render({ - project: project, - region: region, - workflow_template: workflowTemplate, - }); - } - - /** - * Parse the project from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).project; - } - - /** - * Parse the region from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the region. - */ - matchRegionFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).region; - } - - /** - * Parse the workflow_template from ProjectRegionWorkflowTemplate resource. - * - * @param {string} projectRegionWorkflowTemplateName - * A fully-qualified path representing project_region_workflow_template resource. - * @returns {string} A string representing the workflow_template. - */ - matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).workflow_template; - } - - /** - * Return a fully-qualified region resource name string. - * - * @param {string} project - * @param {string} region - * @returns {string} Resource name string. - */ - regionPath(project:string,region:string) { - return this.pathTemplates.regionPathTemplate.render({ - project: project, - region: region, - }); - } - - /** - * Parse the project from Region resource. - * - * @param {string} regionName - * A fully-qualified path representing Region resource. - * @returns {string} A string representing the project. - */ - matchProjectFromRegionName(regionName: string) { - return this.pathTemplates.regionPathTemplate.match(regionName).project; - } - - /** - * Parse the region from Region resource. - * - * @param {string} regionName - * A fully-qualified path representing Region resource. - * @returns {string} A string representing the region. - */ - matchRegionFromRegionName(regionName: string) { - return this.pathTemplates.regionPathTemplate.match(regionName).region; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.workflowTemplateServiceStub && !this._terminated) { - return this.workflowTemplateServiceStub.then(stub => { - this._terminated = true; - stub.close(); - this.operationsClient.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client_config.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client_config.json deleted file mode 100644 index 62d3aa9b9a0..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_client_config.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "interfaces": { - "google.cloud.dataproc.v1.WorkflowTemplateService": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ], - "unavailable": [ - "UNAVAILABLE" - ], - "deadline_exceeded_internal_unavailable": [ - "DEADLINE_EXCEEDED", - "INTERNAL", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "CreateWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - }, - "GetWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "deadline_exceeded_internal_unavailable", - "retry_params_name": "default" - }, - "InstantiateWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - }, - "InstantiateInlineWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - }, - "UpdateWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - }, - "ListWorkflowTemplates": { - "timeout_millis": 600000, - "retry_codes_name": "deadline_exceeded_internal_unavailable", - "retry_params_name": "default" - }, - "DeleteWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - } - } - } - } -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_proto_list.json b/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_proto_list.json deleted file mode 100644 index 3bb7ccf055a..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/workflow_template_service_proto_list.json +++ /dev/null @@ -1,10 +0,0 @@ -[ - "../../protos/google/cloud/dataproc/v1/autoscaling_policies.proto", - "../../protos/google/cloud/dataproc/v1/batches.proto", - "../../protos/google/cloud/dataproc/v1/clusters.proto", - "../../protos/google/cloud/dataproc/v1/jobs.proto", - "../../protos/google/cloud/dataproc/v1/node_groups.proto", - "../../protos/google/cloud/dataproc/v1/operations.proto", - "../../protos/google/cloud/dataproc/v1/shared.proto", - "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" -] diff --git a/owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.js deleted file mode 100644 index 0b94f82406b..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.js +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -/* eslint-disable node/no-missing-require, no-unused-vars */ -const dataproc = require('@google-cloud/dataproc'); - -function main() { - const autoscalingPolicyServiceClient = new dataproc.AutoscalingPolicyServiceClient(); - const batchControllerClient = new dataproc.BatchControllerClient(); - const clusterControllerClient = new dataproc.ClusterControllerClient(); - const jobControllerClient = new dataproc.JobControllerClient(); - const nodeGroupControllerClient = new dataproc.NodeGroupControllerClient(); - const workflowTemplateServiceClient = new dataproc.WorkflowTemplateServiceClient(); -} - -main(); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.ts deleted file mode 100644 index fcc5e31dcea..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/system-test/fixtures/sample/src/index.ts +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import {AutoscalingPolicyServiceClient, BatchControllerClient, ClusterControllerClient, JobControllerClient, NodeGroupControllerClient, WorkflowTemplateServiceClient} from '@google-cloud/dataproc'; - -// check that the client class type name can be used -function doStuffWithAutoscalingPolicyServiceClient(client: AutoscalingPolicyServiceClient) { - client.close(); -} -function doStuffWithBatchControllerClient(client: BatchControllerClient) { - client.close(); -} -function doStuffWithClusterControllerClient(client: ClusterControllerClient) { - client.close(); -} -function doStuffWithJobControllerClient(client: JobControllerClient) { - client.close(); -} -function doStuffWithNodeGroupControllerClient(client: NodeGroupControllerClient) { - client.close(); -} -function doStuffWithWorkflowTemplateServiceClient(client: WorkflowTemplateServiceClient) { - client.close(); -} - -function main() { - // check that the client instance can be created - const autoscalingPolicyServiceClient = new AutoscalingPolicyServiceClient(); - doStuffWithAutoscalingPolicyServiceClient(autoscalingPolicyServiceClient); - // check that the client instance can be created - const batchControllerClient = new BatchControllerClient(); - doStuffWithBatchControllerClient(batchControllerClient); - // check that the client instance can be created - const clusterControllerClient = new ClusterControllerClient(); - doStuffWithClusterControllerClient(clusterControllerClient); - // check that the client instance can be created - const jobControllerClient = new JobControllerClient(); - doStuffWithJobControllerClient(jobControllerClient); - // check that the client instance can be created - const nodeGroupControllerClient = new NodeGroupControllerClient(); - doStuffWithNodeGroupControllerClient(nodeGroupControllerClient); - // check that the client instance can be created - const workflowTemplateServiceClient = new WorkflowTemplateServiceClient(); - doStuffWithWorkflowTemplateServiceClient(workflowTemplateServiceClient); -} - -main(); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/system-test/install.ts b/owl-bot-staging/google-cloud-dataproc/v1/system-test/install.ts deleted file mode 100644 index 557a57558e1..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/system-test/install.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import {packNTest} from 'pack-n-play'; -import {readFileSync} from 'fs'; -import {describe, it} from 'mocha'; - -describe('📦 pack-n-play test', () => { - - it('TypeScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'TypeScript user can use the type definitions', - ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() - } - }; - await packNTest(options); - }); - - it('JavaScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'JavaScript user can use the library', - ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() - } - }; - await packNTest(options); - }); - -}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_autoscaling_policy_service_v1.ts b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_autoscaling_policy_service_v1.ts deleted file mode 100644 index ff961ac6ff2..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_autoscaling_policy_service_v1.ts +++ /dev/null @@ -1,1233 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import {describe, it} from 'mocha'; -import * as autoscalingpolicyserviceModule from '../src'; - -import {PassThrough} from 'stream'; - -import {protobuf} from 'google-gax'; - -// Dynamically loaded proto JSON is needed to get the type information -// to fill in default values for request objects -const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); - -// eslint-disable-next-line @typescript-eslint/no-unused-vars -function getTypeDefaultValue(typeName: string, fields: string[]) { - let type = root.lookupType(typeName) as protobuf.Type; - for (const field of fields.slice(0, -1)) { - type = type.fields[field]?.resolvedType as protobuf.Type; - } - return type.fields[fields[fields.length - 1]]?.defaultValue; -} - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { - return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); -} - -function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { - const pagingStub = sinon.stub(); - if (responses) { - for (let i = 0; i < responses.length; ++i) { - pagingStub.onCall(i).callsArgWith(2, null, responses[i]); - } - } - const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; - const mockStream = new PassThrough({ - objectMode: true, - transform: transformStub, - }); - // trigger as many responses as needed - if (responses) { - for (let i = 0; i < responses.length; ++i) { - setImmediate(() => { mockStream.write({}); }); - } - setImmediate(() => { mockStream.end(); }); - } else { - setImmediate(() => { mockStream.write({}); }); - setImmediate(() => { mockStream.end(); }); - } - return sinon.stub().returns(mockStream); -} - -function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { - let counter = 0; - const asyncIterable = { - [Symbol.asyncIterator]() { - return { - async next() { - if (error) { - return Promise.reject(error); - } - if (counter >= responses!.length) { - return Promise.resolve({done: true, value: undefined}); - } - return Promise.resolve({done: false, value: responses![counter++]}); - } - }; - } - }; - return sinon.stub().returns(asyncIterable); -} - -describe('v1.AutoscalingPolicyServiceClient', () => { - describe('Common methods', () => { - it('has servicePath', () => { - const servicePath = autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.autoscalingPolicyServiceStub, undefined); - await client.initialize(); - assert(client.autoscalingPolicyServiceStub); - }); - - it('has close method for the initialized client', done => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.autoscalingPolicyServiceStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.autoscalingPolicyServiceStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - }); - - describe('createAutoscalingPolicy', () => { - it('invokes createAutoscalingPolicy without error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.AutoscalingPolicy() - ); - client.innerApiCalls.createAutoscalingPolicy = stubSimpleCall(expectedResponse); - const [response] = await client.createAutoscalingPolicy(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.createAutoscalingPolicy as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createAutoscalingPolicy as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createAutoscalingPolicy without error using callback', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.AutoscalingPolicy() - ); - client.innerApiCalls.createAutoscalingPolicy = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.createAutoscalingPolicy( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IAutoscalingPolicy|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.createAutoscalingPolicy as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createAutoscalingPolicy as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createAutoscalingPolicy with error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.createAutoscalingPolicy = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.createAutoscalingPolicy(request), expectedError); - const actualRequest = (client.innerApiCalls.createAutoscalingPolicy as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createAutoscalingPolicy as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createAutoscalingPolicy with closed client', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest', ['parent']); - request.parent = defaultValue1; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.createAutoscalingPolicy(request), expectedError); - }); - }); - - describe('updateAutoscalingPolicy', () => { - it('invokes updateAutoscalingPolicy without error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest() - ); - request.policy ??= {}; - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest', ['policy', 'name']); - request.policy.name = defaultValue1; - const expectedHeaderRequestParams = `policy.name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.AutoscalingPolicy() - ); - client.innerApiCalls.updateAutoscalingPolicy = stubSimpleCall(expectedResponse); - const [response] = await client.updateAutoscalingPolicy(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.updateAutoscalingPolicy as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.updateAutoscalingPolicy as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes updateAutoscalingPolicy without error using callback', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest() - ); - request.policy ??= {}; - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest', ['policy', 'name']); - request.policy.name = defaultValue1; - const expectedHeaderRequestParams = `policy.name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.AutoscalingPolicy() - ); - client.innerApiCalls.updateAutoscalingPolicy = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.updateAutoscalingPolicy( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IAutoscalingPolicy|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.updateAutoscalingPolicy as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.updateAutoscalingPolicy as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes updateAutoscalingPolicy with error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest() - ); - request.policy ??= {}; - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest', ['policy', 'name']); - request.policy.name = defaultValue1; - const expectedHeaderRequestParams = `policy.name=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.updateAutoscalingPolicy = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.updateAutoscalingPolicy(request), expectedError); - const actualRequest = (client.innerApiCalls.updateAutoscalingPolicy as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.updateAutoscalingPolicy as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes updateAutoscalingPolicy with closed client', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest() - ); - request.policy ??= {}; - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest', ['policy', 'name']); - request.policy.name = defaultValue1; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.updateAutoscalingPolicy(request), expectedError); - }); - }); - - describe('getAutoscalingPolicy', () => { - it('invokes getAutoscalingPolicy without error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.AutoscalingPolicy() - ); - client.innerApiCalls.getAutoscalingPolicy = stubSimpleCall(expectedResponse); - const [response] = await client.getAutoscalingPolicy(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.getAutoscalingPolicy as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getAutoscalingPolicy as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getAutoscalingPolicy without error using callback', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.AutoscalingPolicy() - ); - client.innerApiCalls.getAutoscalingPolicy = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.getAutoscalingPolicy( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IAutoscalingPolicy|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.getAutoscalingPolicy as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getAutoscalingPolicy as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getAutoscalingPolicy with error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.getAutoscalingPolicy = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.getAutoscalingPolicy(request), expectedError); - const actualRequest = (client.innerApiCalls.getAutoscalingPolicy as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getAutoscalingPolicy as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getAutoscalingPolicy with closed client', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetAutoscalingPolicyRequest', ['name']); - request.name = defaultValue1; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.getAutoscalingPolicy(request), expectedError); - }); - }); - - describe('deleteAutoscalingPolicy', () => { - it('invokes deleteAutoscalingPolicy without error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.innerApiCalls.deleteAutoscalingPolicy = stubSimpleCall(expectedResponse); - const [response] = await client.deleteAutoscalingPolicy(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.deleteAutoscalingPolicy as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteAutoscalingPolicy as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteAutoscalingPolicy without error using callback', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.innerApiCalls.deleteAutoscalingPolicy = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.deleteAutoscalingPolicy( - request, - (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.deleteAutoscalingPolicy as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteAutoscalingPolicy as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteAutoscalingPolicy with error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.deleteAutoscalingPolicy = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.deleteAutoscalingPolicy(request), expectedError); - const actualRequest = (client.innerApiCalls.deleteAutoscalingPolicy as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteAutoscalingPolicy as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteAutoscalingPolicy with closed client', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest', ['name']); - request.name = defaultValue1; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.deleteAutoscalingPolicy(request), expectedError); - }); - }); - - describe('listAutoscalingPolicies', () => { - it('invokes listAutoscalingPolicies without error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`;const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), - ]; - client.innerApiCalls.listAutoscalingPolicies = stubSimpleCall(expectedResponse); - const [response] = await client.listAutoscalingPolicies(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.listAutoscalingPolicies as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listAutoscalingPolicies as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listAutoscalingPolicies without error using callback', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`;const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), - ]; - client.innerApiCalls.listAutoscalingPolicies = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.listAutoscalingPolicies( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IAutoscalingPolicy[]|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.listAutoscalingPolicies as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listAutoscalingPolicies as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listAutoscalingPolicies with error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.listAutoscalingPolicies = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.listAutoscalingPolicies(request), expectedError); - const actualRequest = (client.innerApiCalls.listAutoscalingPolicies as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listAutoscalingPolicies as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listAutoscalingPoliciesStream without error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), - ]; - client.descriptors.page.listAutoscalingPolicies.createStream = stubPageStreamingCall(expectedResponse); - const stream = client.listAutoscalingPoliciesStream(request); - const promise = new Promise((resolve, reject) => { - const responses: protos.google.cloud.dataproc.v1.AutoscalingPolicy[] = []; - stream.on('data', (response: protos.google.cloud.dataproc.v1.AutoscalingPolicy) => { - responses.push(response); - }); - stream.on('end', () => { - resolve(responses); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - const responses = await promise; - assert.deepStrictEqual(responses, expectedResponse); - assert((client.descriptors.page.listAutoscalingPolicies.createStream as SinonStub) - .getCall(0).calledWith(client.innerApiCalls.listAutoscalingPolicies, request)); - assert( - (client.descriptors.page.listAutoscalingPolicies.createStream as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('invokes listAutoscalingPoliciesStream with error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.descriptors.page.listAutoscalingPolicies.createStream = stubPageStreamingCall(undefined, expectedError); - const stream = client.listAutoscalingPoliciesStream(request); - const promise = new Promise((resolve, reject) => { - const responses: protos.google.cloud.dataproc.v1.AutoscalingPolicy[] = []; - stream.on('data', (response: protos.google.cloud.dataproc.v1.AutoscalingPolicy) => { - responses.push(response); - }); - stream.on('end', () => { - resolve(responses); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - await assert.rejects(promise, expectedError); - assert((client.descriptors.page.listAutoscalingPolicies.createStream as SinonStub) - .getCall(0).calledWith(client.innerApiCalls.listAutoscalingPolicies, request)); - assert( - (client.descriptors.page.listAutoscalingPolicies.createStream as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('uses async iteration with listAutoscalingPolicies without error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.AutoscalingPolicy()), - ]; - client.descriptors.page.listAutoscalingPolicies.asyncIterate = stubAsyncIterationCall(expectedResponse); - const responses: protos.google.cloud.dataproc.v1.IAutoscalingPolicy[] = []; - const iterable = client.listAutoscalingPoliciesAsync(request); - for await (const resource of iterable) { - responses.push(resource!); - } - assert.deepStrictEqual(responses, expectedResponse); - assert.deepStrictEqual( - (client.descriptors.page.listAutoscalingPolicies.asyncIterate as SinonStub) - .getCall(0).args[1], request); - assert( - (client.descriptors.page.listAutoscalingPolicies.asyncIterate as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('uses async iteration with listAutoscalingPolicies with error', async () => { - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.descriptors.page.listAutoscalingPolicies.asyncIterate = stubAsyncIterationCall(undefined, expectedError); - const iterable = client.listAutoscalingPoliciesAsync(request); - await assert.rejects(async () => { - const responses: protos.google.cloud.dataproc.v1.IAutoscalingPolicy[] = []; - for await (const resource of iterable) { - responses.push(resource!); - } - }); - assert.deepStrictEqual( - (client.descriptors.page.listAutoscalingPolicies.asyncIterate as SinonStub) - .getCall(0).args[1], request); - assert( - (client.descriptors.page.listAutoscalingPolicies.asyncIterate as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - }); - - describe('Path templates', () => { - - describe('batch', () => { - const fakePath = "/rendered/path/batch"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - batch: "batchValue", - }; - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.batchPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.batchPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('batchPath', () => { - const result = client.batchPath("projectValue", "locationValue", "batchValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.batchPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromBatchName', () => { - const result = client.matchProjectFromBatchName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromBatchName', () => { - const result = client.matchLocationFromBatchName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchBatchFromBatchName', () => { - const result = client.matchBatchFromBatchName(fakePath); - assert.strictEqual(result, "batchValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('location', () => { - const fakePath = "/rendered/path/location"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - }; - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.locationPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.locationPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('locationPath', () => { - const result = client.locationPath("projectValue", "locationValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.locationPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromLocationName', () => { - const result = client.matchProjectFromLocationName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.locationPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromLocationName', () => { - const result = client.matchLocationFromLocationName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.locationPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('nodeGroup', () => { - const fakePath = "/rendered/path/nodeGroup"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - cluster: "clusterValue", - node_group: "nodeGroupValue", - }; - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.nodeGroupPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.nodeGroupPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('nodeGroupPath', () => { - const result = client.nodeGroupPath("projectValue", "regionValue", "clusterValue", "nodeGroupValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromNodeGroupName', () => { - const result = client.matchProjectFromNodeGroupName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromNodeGroupName', () => { - const result = client.matchRegionFromNodeGroupName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchClusterFromNodeGroupName', () => { - const result = client.matchClusterFromNodeGroupName(fakePath); - assert.strictEqual(result, "clusterValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchNodeGroupFromNodeGroupName', () => { - const result = client.matchNodeGroupFromNodeGroupName(fakePath); - assert.strictEqual(result, "nodeGroupValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('project', () => { - const fakePath = "/rendered/path/project"; - const expectedParameters = { - project: "projectValue", - }; - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectPath', () => { - const result = client.projectPath("projectValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectName', () => { - const result = client.matchProjectFromProjectName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectLocationAutoscalingPolicy', () => { - const fakePath = "/rendered/path/projectLocationAutoscalingPolicy"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - autoscaling_policy: "autoscalingPolicyValue", - }; - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectLocationAutoscalingPolicyPath', () => { - const result = client.projectLocationAutoscalingPolicyPath("projectValue", "locationValue", "autoscalingPolicyValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchProjectFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchLocationFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "autoscalingPolicyValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectLocationWorkflowTemplate', () => { - const fakePath = "/rendered/path/projectLocationWorkflowTemplate"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - workflow_template: "workflowTemplateValue", - }; - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectLocationWorkflowTemplatePath', () => { - const result = client.projectLocationWorkflowTemplatePath("projectValue", "locationValue", "workflowTemplateValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchProjectFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchLocationFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchWorkflowTemplateFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "workflowTemplateValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectRegionAutoscalingPolicy', () => { - const fakePath = "/rendered/path/projectRegionAutoscalingPolicy"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - autoscaling_policy: "autoscalingPolicyValue", - }; - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectRegionAutoscalingPolicyPath', () => { - const result = client.projectRegionAutoscalingPolicyPath("projectValue", "regionValue", "autoscalingPolicyValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchProjectFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchRegionFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "autoscalingPolicyValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectRegionWorkflowTemplate', () => { - const fakePath = "/rendered/path/projectRegionWorkflowTemplate"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - workflow_template: "workflowTemplateValue", - }; - const client = new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectRegionWorkflowTemplatePath', () => { - const result = client.projectRegionWorkflowTemplatePath("projectValue", "regionValue", "workflowTemplateValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchProjectFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchRegionFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchWorkflowTemplateFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "workflowTemplateValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - }); -}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_batch_controller_v1.ts b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_batch_controller_v1.ts deleted file mode 100644 index f30e15246ba..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_batch_controller_v1.ts +++ /dev/null @@ -1,1183 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import {describe, it} from 'mocha'; -import * as batchcontrollerModule from '../src'; - -import {PassThrough} from 'stream'; - -import {protobuf, LROperation, operationsProtos} from 'google-gax'; - -// Dynamically loaded proto JSON is needed to get the type information -// to fill in default values for request objects -const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); - -// eslint-disable-next-line @typescript-eslint/no-unused-vars -function getTypeDefaultValue(typeName: string, fields: string[]) { - let type = root.lookupType(typeName) as protobuf.Type; - for (const field of fields.slice(0, -1)) { - type = type.fields[field]?.resolvedType as protobuf.Type; - } - return type.fields[fields[fields.length - 1]]?.defaultValue; -} - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { - return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); -} - -function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); -} - -function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); -} - -function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { - const pagingStub = sinon.stub(); - if (responses) { - for (let i = 0; i < responses.length; ++i) { - pagingStub.onCall(i).callsArgWith(2, null, responses[i]); - } - } - const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; - const mockStream = new PassThrough({ - objectMode: true, - transform: transformStub, - }); - // trigger as many responses as needed - if (responses) { - for (let i = 0; i < responses.length; ++i) { - setImmediate(() => { mockStream.write({}); }); - } - setImmediate(() => { mockStream.end(); }); - } else { - setImmediate(() => { mockStream.write({}); }); - setImmediate(() => { mockStream.end(); }); - } - return sinon.stub().returns(mockStream); -} - -function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { - let counter = 0; - const asyncIterable = { - [Symbol.asyncIterator]() { - return { - async next() { - if (error) { - return Promise.reject(error); - } - if (counter >= responses!.length) { - return Promise.resolve({done: true, value: undefined}); - } - return Promise.resolve({done: false, value: responses![counter++]}); - } - }; - } - }; - return sinon.stub().returns(asyncIterable); -} - -describe('v1.BatchControllerClient', () => { - describe('Common methods', () => { - it('has servicePath', () => { - const servicePath = batchcontrollerModule.v1.BatchControllerClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = batchcontrollerModule.v1.BatchControllerClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = batchcontrollerModule.v1.BatchControllerClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new batchcontrollerModule.v1.BatchControllerClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.batchControllerStub, undefined); - await client.initialize(); - assert(client.batchControllerStub); - }); - - it('has close method for the initialized client', done => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.batchControllerStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.batchControllerStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - }); - - describe('getBatch', () => { - it('invokes getBatch without error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetBatchRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetBatchRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.Batch() - ); - client.innerApiCalls.getBatch = stubSimpleCall(expectedResponse); - const [response] = await client.getBatch(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.getBatch as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getBatch as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getBatch without error using callback', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetBatchRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetBatchRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.Batch() - ); - client.innerApiCalls.getBatch = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.getBatch( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IBatch|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.getBatch as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getBatch as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getBatch with error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetBatchRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetBatchRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.getBatch = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.getBatch(request), expectedError); - const actualRequest = (client.innerApiCalls.getBatch as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getBatch as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getBatch with closed client', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetBatchRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetBatchRequest', ['name']); - request.name = defaultValue1; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.getBatch(request), expectedError); - }); - }); - - describe('deleteBatch', () => { - it('invokes deleteBatch without error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteBatchRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteBatchRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.innerApiCalls.deleteBatch = stubSimpleCall(expectedResponse); - const [response] = await client.deleteBatch(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.deleteBatch as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteBatch as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteBatch without error using callback', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteBatchRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteBatchRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.innerApiCalls.deleteBatch = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.deleteBatch( - request, - (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.deleteBatch as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteBatch as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteBatch with error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteBatchRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteBatchRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.deleteBatch = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.deleteBatch(request), expectedError); - const actualRequest = (client.innerApiCalls.deleteBatch as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteBatch as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteBatch with closed client', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteBatchRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteBatchRequest', ['name']); - request.name = defaultValue1; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.deleteBatch(request), expectedError); - }); - }); - - describe('createBatch', () => { - it('invokes createBatch without error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateBatchRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateBatchRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.createBatch = stubLongRunningCall(expectedResponse); - const [operation] = await client.createBatch(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.createBatch as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createBatch as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createBatch without error using callback', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateBatchRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateBatchRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.createBatch = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.createBatch( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.createBatch as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createBatch as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createBatch with call error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateBatchRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateBatchRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.createBatch = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.createBatch(request), expectedError); - const actualRequest = (client.innerApiCalls.createBatch as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createBatch as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createBatch with LRO error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateBatchRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateBatchRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.createBatch = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.createBatch(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.createBatch as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createBatch as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkCreateBatchProgress without error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkCreateBatchProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkCreateBatchProgress with error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkCreateBatchProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - - describe('listBatches', () => { - it('invokes listBatches without error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListBatchesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`;const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), - ]; - client.innerApiCalls.listBatches = stubSimpleCall(expectedResponse); - const [response] = await client.listBatches(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.listBatches as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listBatches as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listBatches without error using callback', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListBatchesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`;const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), - ]; - client.innerApiCalls.listBatches = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.listBatches( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IBatch[]|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.listBatches as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listBatches as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listBatches with error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListBatchesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.listBatches = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.listBatches(request), expectedError); - const actualRequest = (client.innerApiCalls.listBatches as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listBatches as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listBatchesStream without error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListBatchesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), - ]; - client.descriptors.page.listBatches.createStream = stubPageStreamingCall(expectedResponse); - const stream = client.listBatchesStream(request); - const promise = new Promise((resolve, reject) => { - const responses: protos.google.cloud.dataproc.v1.Batch[] = []; - stream.on('data', (response: protos.google.cloud.dataproc.v1.Batch) => { - responses.push(response); - }); - stream.on('end', () => { - resolve(responses); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - const responses = await promise; - assert.deepStrictEqual(responses, expectedResponse); - assert((client.descriptors.page.listBatches.createStream as SinonStub) - .getCall(0).calledWith(client.innerApiCalls.listBatches, request)); - assert( - (client.descriptors.page.listBatches.createStream as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('invokes listBatchesStream with error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListBatchesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.descriptors.page.listBatches.createStream = stubPageStreamingCall(undefined, expectedError); - const stream = client.listBatchesStream(request); - const promise = new Promise((resolve, reject) => { - const responses: protos.google.cloud.dataproc.v1.Batch[] = []; - stream.on('data', (response: protos.google.cloud.dataproc.v1.Batch) => { - responses.push(response); - }); - stream.on('end', () => { - resolve(responses); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - await assert.rejects(promise, expectedError); - assert((client.descriptors.page.listBatches.createStream as SinonStub) - .getCall(0).calledWith(client.innerApiCalls.listBatches, request)); - assert( - (client.descriptors.page.listBatches.createStream as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('uses async iteration with listBatches without error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListBatchesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Batch()), - ]; - client.descriptors.page.listBatches.asyncIterate = stubAsyncIterationCall(expectedResponse); - const responses: protos.google.cloud.dataproc.v1.IBatch[] = []; - const iterable = client.listBatchesAsync(request); - for await (const resource of iterable) { - responses.push(resource!); - } - assert.deepStrictEqual(responses, expectedResponse); - assert.deepStrictEqual( - (client.descriptors.page.listBatches.asyncIterate as SinonStub) - .getCall(0).args[1], request); - assert( - (client.descriptors.page.listBatches.asyncIterate as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('uses async iteration with listBatches with error', async () => { - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListBatchesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListBatchesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.descriptors.page.listBatches.asyncIterate = stubAsyncIterationCall(undefined, expectedError); - const iterable = client.listBatchesAsync(request); - await assert.rejects(async () => { - const responses: protos.google.cloud.dataproc.v1.IBatch[] = []; - for await (const resource of iterable) { - responses.push(resource!); - } - }); - assert.deepStrictEqual( - (client.descriptors.page.listBatches.asyncIterate as SinonStub) - .getCall(0).args[1], request); - assert( - (client.descriptors.page.listBatches.asyncIterate as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - }); - - describe('Path templates', () => { - - describe('batch', () => { - const fakePath = "/rendered/path/batch"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - batch: "batchValue", - }; - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.batchPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.batchPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('batchPath', () => { - const result = client.batchPath("projectValue", "locationValue", "batchValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.batchPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromBatchName', () => { - const result = client.matchProjectFromBatchName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromBatchName', () => { - const result = client.matchLocationFromBatchName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchBatchFromBatchName', () => { - const result = client.matchBatchFromBatchName(fakePath); - assert.strictEqual(result, "batchValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('location', () => { - const fakePath = "/rendered/path/location"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - }; - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.locationPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.locationPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('locationPath', () => { - const result = client.locationPath("projectValue", "locationValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.locationPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromLocationName', () => { - const result = client.matchProjectFromLocationName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.locationPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromLocationName', () => { - const result = client.matchLocationFromLocationName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.locationPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('nodeGroup', () => { - const fakePath = "/rendered/path/nodeGroup"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - cluster: "clusterValue", - node_group: "nodeGroupValue", - }; - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.nodeGroupPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.nodeGroupPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('nodeGroupPath', () => { - const result = client.nodeGroupPath("projectValue", "regionValue", "clusterValue", "nodeGroupValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromNodeGroupName', () => { - const result = client.matchProjectFromNodeGroupName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromNodeGroupName', () => { - const result = client.matchRegionFromNodeGroupName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchClusterFromNodeGroupName', () => { - const result = client.matchClusterFromNodeGroupName(fakePath); - assert.strictEqual(result, "clusterValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchNodeGroupFromNodeGroupName', () => { - const result = client.matchNodeGroupFromNodeGroupName(fakePath); - assert.strictEqual(result, "nodeGroupValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('project', () => { - const fakePath = "/rendered/path/project"; - const expectedParameters = { - project: "projectValue", - }; - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectPath', () => { - const result = client.projectPath("projectValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectName', () => { - const result = client.matchProjectFromProjectName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectLocationAutoscalingPolicy', () => { - const fakePath = "/rendered/path/projectLocationAutoscalingPolicy"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - autoscaling_policy: "autoscalingPolicyValue", - }; - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectLocationAutoscalingPolicyPath', () => { - const result = client.projectLocationAutoscalingPolicyPath("projectValue", "locationValue", "autoscalingPolicyValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchProjectFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchLocationFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "autoscalingPolicyValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectLocationWorkflowTemplate', () => { - const fakePath = "/rendered/path/projectLocationWorkflowTemplate"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - workflow_template: "workflowTemplateValue", - }; - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectLocationWorkflowTemplatePath', () => { - const result = client.projectLocationWorkflowTemplatePath("projectValue", "locationValue", "workflowTemplateValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchProjectFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchLocationFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchWorkflowTemplateFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "workflowTemplateValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectRegionAutoscalingPolicy', () => { - const fakePath = "/rendered/path/projectRegionAutoscalingPolicy"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - autoscaling_policy: "autoscalingPolicyValue", - }; - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectRegionAutoscalingPolicyPath', () => { - const result = client.projectRegionAutoscalingPolicyPath("projectValue", "regionValue", "autoscalingPolicyValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchProjectFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchRegionFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "autoscalingPolicyValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectRegionWorkflowTemplate', () => { - const fakePath = "/rendered/path/projectRegionWorkflowTemplate"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - workflow_template: "workflowTemplateValue", - }; - const client = new batchcontrollerModule.v1.BatchControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectRegionWorkflowTemplatePath', () => { - const result = client.projectRegionWorkflowTemplatePath("projectValue", "regionValue", "workflowTemplateValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchProjectFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchRegionFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchWorkflowTemplateFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "workflowTemplateValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - }); -}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_cluster_controller_v1.ts b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_cluster_controller_v1.ts deleted file mode 100644 index 01f54ef9787..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_cluster_controller_v1.ts +++ /dev/null @@ -1,2000 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import {describe, it} from 'mocha'; -import * as clustercontrollerModule from '../src'; - -import {PassThrough} from 'stream'; - -import {protobuf, LROperation, operationsProtos} from 'google-gax'; - -// Dynamically loaded proto JSON is needed to get the type information -// to fill in default values for request objects -const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); - -// eslint-disable-next-line @typescript-eslint/no-unused-vars -function getTypeDefaultValue(typeName: string, fields: string[]) { - let type = root.lookupType(typeName) as protobuf.Type; - for (const field of fields.slice(0, -1)) { - type = type.fields[field]?.resolvedType as protobuf.Type; - } - return type.fields[fields[fields.length - 1]]?.defaultValue; -} - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { - return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); -} - -function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); -} - -function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); -} - -function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { - const pagingStub = sinon.stub(); - if (responses) { - for (let i = 0; i < responses.length; ++i) { - pagingStub.onCall(i).callsArgWith(2, null, responses[i]); - } - } - const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; - const mockStream = new PassThrough({ - objectMode: true, - transform: transformStub, - }); - // trigger as many responses as needed - if (responses) { - for (let i = 0; i < responses.length; ++i) { - setImmediate(() => { mockStream.write({}); }); - } - setImmediate(() => { mockStream.end(); }); - } else { - setImmediate(() => { mockStream.write({}); }); - setImmediate(() => { mockStream.end(); }); - } - return sinon.stub().returns(mockStream); -} - -function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { - let counter = 0; - const asyncIterable = { - [Symbol.asyncIterator]() { - return { - async next() { - if (error) { - return Promise.reject(error); - } - if (counter >= responses!.length) { - return Promise.resolve({done: true, value: undefined}); - } - return Promise.resolve({done: false, value: responses![counter++]}); - } - }; - } - }; - return sinon.stub().returns(asyncIterable); -} - -describe('v1.ClusterControllerClient', () => { - describe('Common methods', () => { - it('has servicePath', () => { - const servicePath = clustercontrollerModule.v1.ClusterControllerClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = clustercontrollerModule.v1.ClusterControllerClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = clustercontrollerModule.v1.ClusterControllerClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.clusterControllerStub, undefined); - await client.initialize(); - assert(client.clusterControllerStub); - }); - - it('has close method for the initialized client', done => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.clusterControllerStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.clusterControllerStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - }); - - describe('getCluster', () => { - it('invokes getCluster without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.Cluster() - ); - client.innerApiCalls.getCluster = stubSimpleCall(expectedResponse); - const [response] = await client.getCluster(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.getCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getCluster without error using callback', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.Cluster() - ); - client.innerApiCalls.getCluster = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.getCluster( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.ICluster|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.getCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getCluster with error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.getCluster = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.getCluster(request), expectedError); - const actualRequest = (client.innerApiCalls.getCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getCluster with closed client', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.getCluster(request), expectedError); - }); - }); - - describe('createCluster', () => { - it('invokes createCluster without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.createCluster = stubLongRunningCall(expectedResponse); - const [operation] = await client.createCluster(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.createCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createCluster without error using callback', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.createCluster = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.createCluster( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.createCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createCluster with call error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedError = new Error('expected'); - client.innerApiCalls.createCluster = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.createCluster(request), expectedError); - const actualRequest = (client.innerApiCalls.createCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createCluster with LRO error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateClusterRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedError = new Error('expected'); - client.innerApiCalls.createCluster = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.createCluster(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.createCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkCreateClusterProgress without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkCreateClusterProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkCreateClusterProgress with error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkCreateClusterProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - - describe('updateCluster', () => { - it('invokes updateCluster without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.updateCluster = stubLongRunningCall(expectedResponse); - const [operation] = await client.updateCluster(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.updateCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.updateCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes updateCluster without error using callback', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.updateCluster = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.updateCluster( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.updateCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.updateCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes updateCluster with call error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.updateCluster = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.updateCluster(request), expectedError); - const actualRequest = (client.innerApiCalls.updateCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.updateCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes updateCluster with LRO error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.updateCluster = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.updateCluster(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.updateCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.updateCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkUpdateClusterProgress without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkUpdateClusterProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkUpdateClusterProgress with error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkUpdateClusterProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - - describe('stopCluster', () => { - it('invokes stopCluster without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.StopClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.stopCluster = stubLongRunningCall(expectedResponse); - const [operation] = await client.stopCluster(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.stopCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.stopCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes stopCluster without error using callback', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.StopClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.stopCluster = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.stopCluster( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.stopCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.stopCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes stopCluster with call error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.StopClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.stopCluster = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.stopCluster(request), expectedError); - const actualRequest = (client.innerApiCalls.stopCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.stopCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes stopCluster with LRO error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.StopClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StopClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.stopCluster = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.stopCluster(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.stopCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.stopCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkStopClusterProgress without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkStopClusterProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkStopClusterProgress with error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkStopClusterProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - - describe('startCluster', () => { - it('invokes startCluster without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.StartClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.startCluster = stubLongRunningCall(expectedResponse); - const [operation] = await client.startCluster(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.startCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.startCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes startCluster without error using callback', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.StartClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.startCluster = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.startCluster( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.startCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.startCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes startCluster with call error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.StartClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.startCluster = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.startCluster(request), expectedError); - const actualRequest = (client.innerApiCalls.startCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.startCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes startCluster with LRO error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.StartClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.StartClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.startCluster = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.startCluster(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.startCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.startCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkStartClusterProgress without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkStartClusterProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkStartClusterProgress with error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkStartClusterProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - - describe('deleteCluster', () => { - it('invokes deleteCluster without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.deleteCluster = stubLongRunningCall(expectedResponse); - const [operation] = await client.deleteCluster(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.deleteCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteCluster without error using callback', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.deleteCluster = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.deleteCluster( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.deleteCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteCluster with call error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.deleteCluster = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.deleteCluster(request), expectedError); - const actualRequest = (client.innerApiCalls.deleteCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteCluster with LRO error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.deleteCluster = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.deleteCluster(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.deleteCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkDeleteClusterProgress without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkDeleteClusterProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkDeleteClusterProgress with error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkDeleteClusterProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - - describe('diagnoseCluster', () => { - it('invokes diagnoseCluster without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DiagnoseClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.diagnoseCluster = stubLongRunningCall(expectedResponse); - const [operation] = await client.diagnoseCluster(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.diagnoseCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.diagnoseCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes diagnoseCluster without error using callback', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DiagnoseClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.diagnoseCluster = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.diagnoseCluster( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.diagnoseCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.diagnoseCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes diagnoseCluster with call error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DiagnoseClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.diagnoseCluster = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.diagnoseCluster(request), expectedError); - const actualRequest = (client.innerApiCalls.diagnoseCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.diagnoseCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes diagnoseCluster with LRO error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DiagnoseClusterRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DiagnoseClusterRequest', ['clusterName']); - request.clusterName = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&cluster_name=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.diagnoseCluster = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.diagnoseCluster(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.diagnoseCluster as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.diagnoseCluster as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkDiagnoseClusterProgress without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkDiagnoseClusterProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkDiagnoseClusterProgress with error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkDiagnoseClusterProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - - describe('listClusters', () => { - it('invokes listClusters without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListClustersRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`;const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), - ]; - client.innerApiCalls.listClusters = stubSimpleCall(expectedResponse); - const [response] = await client.listClusters(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.listClusters as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listClusters as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listClusters without error using callback', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListClustersRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`;const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), - ]; - client.innerApiCalls.listClusters = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.listClusters( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.ICluster[]|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.listClusters as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listClusters as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listClusters with error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListClustersRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedError = new Error('expected'); - client.innerApiCalls.listClusters = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.listClusters(request), expectedError); - const actualRequest = (client.innerApiCalls.listClusters as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listClusters as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listClustersStream without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListClustersRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), - ]; - client.descriptors.page.listClusters.createStream = stubPageStreamingCall(expectedResponse); - const stream = client.listClustersStream(request); - const promise = new Promise((resolve, reject) => { - const responses: protos.google.cloud.dataproc.v1.Cluster[] = []; - stream.on('data', (response: protos.google.cloud.dataproc.v1.Cluster) => { - responses.push(response); - }); - stream.on('end', () => { - resolve(responses); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - const responses = await promise; - assert.deepStrictEqual(responses, expectedResponse); - assert((client.descriptors.page.listClusters.createStream as SinonStub) - .getCall(0).calledWith(client.innerApiCalls.listClusters, request)); - assert( - (client.descriptors.page.listClusters.createStream as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('invokes listClustersStream with error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListClustersRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedError = new Error('expected'); - client.descriptors.page.listClusters.createStream = stubPageStreamingCall(undefined, expectedError); - const stream = client.listClustersStream(request); - const promise = new Promise((resolve, reject) => { - const responses: protos.google.cloud.dataproc.v1.Cluster[] = []; - stream.on('data', (response: protos.google.cloud.dataproc.v1.Cluster) => { - responses.push(response); - }); - stream.on('end', () => { - resolve(responses); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - await assert.rejects(promise, expectedError); - assert((client.descriptors.page.listClusters.createStream as SinonStub) - .getCall(0).calledWith(client.innerApiCalls.listClusters, request)); - assert( - (client.descriptors.page.listClusters.createStream as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('uses async iteration with listClusters without error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListClustersRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Cluster()), - ]; - client.descriptors.page.listClusters.asyncIterate = stubAsyncIterationCall(expectedResponse); - const responses: protos.google.cloud.dataproc.v1.ICluster[] = []; - const iterable = client.listClustersAsync(request); - for await (const resource of iterable) { - responses.push(resource!); - } - assert.deepStrictEqual(responses, expectedResponse); - assert.deepStrictEqual( - (client.descriptors.page.listClusters.asyncIterate as SinonStub) - .getCall(0).args[1], request); - assert( - (client.descriptors.page.listClusters.asyncIterate as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('uses async iteration with listClusters with error', async () => { - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListClustersRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListClustersRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedError = new Error('expected'); - client.descriptors.page.listClusters.asyncIterate = stubAsyncIterationCall(undefined, expectedError); - const iterable = client.listClustersAsync(request); - await assert.rejects(async () => { - const responses: protos.google.cloud.dataproc.v1.ICluster[] = []; - for await (const resource of iterable) { - responses.push(resource!); - } - }); - assert.deepStrictEqual( - (client.descriptors.page.listClusters.asyncIterate as SinonStub) - .getCall(0).args[1], request); - assert( - (client.descriptors.page.listClusters.asyncIterate as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - }); - - describe('Path templates', () => { - - describe('batch', () => { - const fakePath = "/rendered/path/batch"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - batch: "batchValue", - }; - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.batchPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.batchPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('batchPath', () => { - const result = client.batchPath("projectValue", "locationValue", "batchValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.batchPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromBatchName', () => { - const result = client.matchProjectFromBatchName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromBatchName', () => { - const result = client.matchLocationFromBatchName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchBatchFromBatchName', () => { - const result = client.matchBatchFromBatchName(fakePath); - assert.strictEqual(result, "batchValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('nodeGroup', () => { - const fakePath = "/rendered/path/nodeGroup"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - cluster: "clusterValue", - node_group: "nodeGroupValue", - }; - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.nodeGroupPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.nodeGroupPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('nodeGroupPath', () => { - const result = client.nodeGroupPath("projectValue", "regionValue", "clusterValue", "nodeGroupValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromNodeGroupName', () => { - const result = client.matchProjectFromNodeGroupName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromNodeGroupName', () => { - const result = client.matchRegionFromNodeGroupName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchClusterFromNodeGroupName', () => { - const result = client.matchClusterFromNodeGroupName(fakePath); - assert.strictEqual(result, "clusterValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchNodeGroupFromNodeGroupName', () => { - const result = client.matchNodeGroupFromNodeGroupName(fakePath); - assert.strictEqual(result, "nodeGroupValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectLocationAutoscalingPolicy', () => { - const fakePath = "/rendered/path/projectLocationAutoscalingPolicy"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - autoscaling_policy: "autoscalingPolicyValue", - }; - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectLocationAutoscalingPolicyPath', () => { - const result = client.projectLocationAutoscalingPolicyPath("projectValue", "locationValue", "autoscalingPolicyValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchProjectFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchLocationFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "autoscalingPolicyValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectLocationWorkflowTemplate', () => { - const fakePath = "/rendered/path/projectLocationWorkflowTemplate"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - workflow_template: "workflowTemplateValue", - }; - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectLocationWorkflowTemplatePath', () => { - const result = client.projectLocationWorkflowTemplatePath("projectValue", "locationValue", "workflowTemplateValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchProjectFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchLocationFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchWorkflowTemplateFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "workflowTemplateValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectRegionAutoscalingPolicy', () => { - const fakePath = "/rendered/path/projectRegionAutoscalingPolicy"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - autoscaling_policy: "autoscalingPolicyValue", - }; - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectRegionAutoscalingPolicyPath', () => { - const result = client.projectRegionAutoscalingPolicyPath("projectValue", "regionValue", "autoscalingPolicyValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchProjectFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchRegionFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "autoscalingPolicyValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectRegionWorkflowTemplate', () => { - const fakePath = "/rendered/path/projectRegionWorkflowTemplate"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - workflow_template: "workflowTemplateValue", - }; - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectRegionWorkflowTemplatePath', () => { - const result = client.projectRegionWorkflowTemplatePath("projectValue", "regionValue", "workflowTemplateValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchProjectFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchRegionFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchWorkflowTemplateFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "workflowTemplateValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('service', () => { - const fakePath = "/rendered/path/service"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - service: "serviceValue", - }; - const client = new clustercontrollerModule.v1.ClusterControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.servicePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.servicePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('servicePath', () => { - const result = client.servicePath("projectValue", "locationValue", "serviceValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.servicePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromServiceName', () => { - const result = client.matchProjectFromServiceName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.servicePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromServiceName', () => { - const result = client.matchLocationFromServiceName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.servicePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchServiceFromServiceName', () => { - const result = client.matchServiceFromServiceName(fakePath); - assert.strictEqual(result, "serviceValue"); - assert((client.pathTemplates.servicePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - }); -}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_job_controller_v1.ts b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_job_controller_v1.ts deleted file mode 100644 index 875a586cb5f..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_job_controller_v1.ts +++ /dev/null @@ -1,1580 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import {describe, it} from 'mocha'; -import * as jobcontrollerModule from '../src'; - -import {PassThrough} from 'stream'; - -import {protobuf, LROperation, operationsProtos} from 'google-gax'; - -// Dynamically loaded proto JSON is needed to get the type information -// to fill in default values for request objects -const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); - -// eslint-disable-next-line @typescript-eslint/no-unused-vars -function getTypeDefaultValue(typeName: string, fields: string[]) { - let type = root.lookupType(typeName) as protobuf.Type; - for (const field of fields.slice(0, -1)) { - type = type.fields[field]?.resolvedType as protobuf.Type; - } - return type.fields[fields[fields.length - 1]]?.defaultValue; -} - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { - return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); -} - -function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); -} - -function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); -} - -function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { - const pagingStub = sinon.stub(); - if (responses) { - for (let i = 0; i < responses.length; ++i) { - pagingStub.onCall(i).callsArgWith(2, null, responses[i]); - } - } - const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; - const mockStream = new PassThrough({ - objectMode: true, - transform: transformStub, - }); - // trigger as many responses as needed - if (responses) { - for (let i = 0; i < responses.length; ++i) { - setImmediate(() => { mockStream.write({}); }); - } - setImmediate(() => { mockStream.end(); }); - } else { - setImmediate(() => { mockStream.write({}); }); - setImmediate(() => { mockStream.end(); }); - } - return sinon.stub().returns(mockStream); -} - -function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { - let counter = 0; - const asyncIterable = { - [Symbol.asyncIterator]() { - return { - async next() { - if (error) { - return Promise.reject(error); - } - if (counter >= responses!.length) { - return Promise.resolve({done: true, value: undefined}); - } - return Promise.resolve({done: false, value: responses![counter++]}); - } - }; - } - }; - return sinon.stub().returns(asyncIterable); -} - -describe('v1.JobControllerClient', () => { - describe('Common methods', () => { - it('has servicePath', () => { - const servicePath = jobcontrollerModule.v1.JobControllerClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = jobcontrollerModule.v1.JobControllerClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = jobcontrollerModule.v1.JobControllerClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new jobcontrollerModule.v1.JobControllerClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.jobControllerStub, undefined); - await client.initialize(); - assert(client.jobControllerStub); - }); - - it('has close method for the initialized client', done => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.jobControllerStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.jobControllerStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - }); - - describe('submitJob', () => { - it('invokes submitJob without error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.SubmitJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.Job() - ); - client.innerApiCalls.submitJob = stubSimpleCall(expectedResponse); - const [response] = await client.submitJob(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.submitJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.submitJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes submitJob without error using callback', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.SubmitJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.Job() - ); - client.innerApiCalls.submitJob = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.submitJob( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IJob|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.submitJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.submitJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes submitJob with error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.SubmitJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedError = new Error('expected'); - client.innerApiCalls.submitJob = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.submitJob(request), expectedError); - const actualRequest = (client.innerApiCalls.submitJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.submitJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes submitJob with closed client', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.SubmitJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); - request.region = defaultValue2; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.submitJob(request), expectedError); - }); - }); - - describe('getJob', () => { - it('invokes getJob without error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.Job() - ); - client.innerApiCalls.getJob = stubSimpleCall(expectedResponse); - const [response] = await client.getJob(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.getJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getJob without error using callback', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.Job() - ); - client.innerApiCalls.getJob = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.getJob( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IJob|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.getJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getJob with error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.getJob = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.getJob(request), expectedError); - const actualRequest = (client.innerApiCalls.getJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getJob with closed client', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.getJob(request), expectedError); - }); - }); - - describe('updateJob', () => { - it('invokes updateJob without error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.Job() - ); - client.innerApiCalls.updateJob = stubSimpleCall(expectedResponse); - const [response] = await client.updateJob(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.updateJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.updateJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes updateJob without error using callback', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.Job() - ); - client.innerApiCalls.updateJob = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.updateJob( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IJob|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.updateJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.updateJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes updateJob with error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.updateJob = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.updateJob(request), expectedError); - const actualRequest = (client.innerApiCalls.updateJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.updateJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes updateJob with closed client', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.updateJob(request), expectedError); - }); - }); - - describe('cancelJob', () => { - it('invokes cancelJob without error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CancelJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.Job() - ); - client.innerApiCalls.cancelJob = stubSimpleCall(expectedResponse); - const [response] = await client.cancelJob(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.cancelJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.cancelJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes cancelJob without error using callback', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CancelJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.Job() - ); - client.innerApiCalls.cancelJob = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.cancelJob( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IJob|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.cancelJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.cancelJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes cancelJob with error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CancelJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.cancelJob = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.cancelJob(request), expectedError); - const actualRequest = (client.innerApiCalls.cancelJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.cancelJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes cancelJob with closed client', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CancelJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CancelJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.cancelJob(request), expectedError); - }); - }); - - describe('deleteJob', () => { - it('invokes deleteJob without error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.innerApiCalls.deleteJob = stubSimpleCall(expectedResponse); - const [response] = await client.deleteJob(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.deleteJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteJob without error using callback', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.innerApiCalls.deleteJob = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.deleteJob( - request, - (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.deleteJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteJob with error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}&job_id=${defaultValue3}`; - const expectedError = new Error('expected'); - client.innerApiCalls.deleteJob = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.deleteJob(request), expectedError); - const actualRequest = (client.innerApiCalls.deleteJob as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteJob as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteJob with closed client', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['region']); - request.region = defaultValue2; - const defaultValue3 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteJobRequest', ['jobId']); - request.jobId = defaultValue3; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.deleteJob(request), expectedError); - }); - }); - - describe('submitJobAsOperation', () => { - it('invokes submitJobAsOperation without error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.SubmitJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.submitJobAsOperation = stubLongRunningCall(expectedResponse); - const [operation] = await client.submitJobAsOperation(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.submitJobAsOperation as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.submitJobAsOperation as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes submitJobAsOperation without error using callback', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.SubmitJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.submitJobAsOperation = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.submitJobAsOperation( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.submitJobAsOperation as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.submitJobAsOperation as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes submitJobAsOperation with call error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.SubmitJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedError = new Error('expected'); - client.innerApiCalls.submitJobAsOperation = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.submitJobAsOperation(request), expectedError); - const actualRequest = (client.innerApiCalls.submitJobAsOperation as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.submitJobAsOperation as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes submitJobAsOperation with LRO error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.SubmitJobRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.SubmitJobRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedError = new Error('expected'); - client.innerApiCalls.submitJobAsOperation = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.submitJobAsOperation(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.submitJobAsOperation as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.submitJobAsOperation as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkSubmitJobAsOperationProgress without error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkSubmitJobAsOperationProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkSubmitJobAsOperationProgress with error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkSubmitJobAsOperationProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - - describe('listJobs', () => { - it('invokes listJobs without error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListJobsRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`;const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), - ]; - client.innerApiCalls.listJobs = stubSimpleCall(expectedResponse); - const [response] = await client.listJobs(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.listJobs as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listJobs as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listJobs without error using callback', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListJobsRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`;const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), - ]; - client.innerApiCalls.listJobs = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.listJobs( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IJob[]|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.listJobs as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listJobs as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listJobs with error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListJobsRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedError = new Error('expected'); - client.innerApiCalls.listJobs = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.listJobs(request), expectedError); - const actualRequest = (client.innerApiCalls.listJobs as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listJobs as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listJobsStream without error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListJobsRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), - ]; - client.descriptors.page.listJobs.createStream = stubPageStreamingCall(expectedResponse); - const stream = client.listJobsStream(request); - const promise = new Promise((resolve, reject) => { - const responses: protos.google.cloud.dataproc.v1.Job[] = []; - stream.on('data', (response: protos.google.cloud.dataproc.v1.Job) => { - responses.push(response); - }); - stream.on('end', () => { - resolve(responses); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - const responses = await promise; - assert.deepStrictEqual(responses, expectedResponse); - assert((client.descriptors.page.listJobs.createStream as SinonStub) - .getCall(0).calledWith(client.innerApiCalls.listJobs, request)); - assert( - (client.descriptors.page.listJobs.createStream as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('invokes listJobsStream with error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListJobsRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedError = new Error('expected'); - client.descriptors.page.listJobs.createStream = stubPageStreamingCall(undefined, expectedError); - const stream = client.listJobsStream(request); - const promise = new Promise((resolve, reject) => { - const responses: protos.google.cloud.dataproc.v1.Job[] = []; - stream.on('data', (response: protos.google.cloud.dataproc.v1.Job) => { - responses.push(response); - }); - stream.on('end', () => { - resolve(responses); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - await assert.rejects(promise, expectedError); - assert((client.descriptors.page.listJobs.createStream as SinonStub) - .getCall(0).calledWith(client.innerApiCalls.listJobs, request)); - assert( - (client.descriptors.page.listJobs.createStream as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('uses async iteration with listJobs without error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListJobsRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.Job()), - ]; - client.descriptors.page.listJobs.asyncIterate = stubAsyncIterationCall(expectedResponse); - const responses: protos.google.cloud.dataproc.v1.IJob[] = []; - const iterable = client.listJobsAsync(request); - for await (const resource of iterable) { - responses.push(resource!); - } - assert.deepStrictEqual(responses, expectedResponse); - assert.deepStrictEqual( - (client.descriptors.page.listJobs.asyncIterate as SinonStub) - .getCall(0).args[1], request); - assert( - (client.descriptors.page.listJobs.asyncIterate as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('uses async iteration with listJobs with error', async () => { - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListJobsRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['projectId']); - request.projectId = defaultValue1; - const defaultValue2 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListJobsRequest', ['region']); - request.region = defaultValue2; - const expectedHeaderRequestParams = `project_id=${defaultValue1}®ion=${defaultValue2}`; - const expectedError = new Error('expected'); - client.descriptors.page.listJobs.asyncIterate = stubAsyncIterationCall(undefined, expectedError); - const iterable = client.listJobsAsync(request); - await assert.rejects(async () => { - const responses: protos.google.cloud.dataproc.v1.IJob[] = []; - for await (const resource of iterable) { - responses.push(resource!); - } - }); - assert.deepStrictEqual( - (client.descriptors.page.listJobs.asyncIterate as SinonStub) - .getCall(0).args[1], request); - assert( - (client.descriptors.page.listJobs.asyncIterate as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - }); - - describe('Path templates', () => { - - describe('batch', () => { - const fakePath = "/rendered/path/batch"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - batch: "batchValue", - }; - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.batchPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.batchPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('batchPath', () => { - const result = client.batchPath("projectValue", "locationValue", "batchValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.batchPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromBatchName', () => { - const result = client.matchProjectFromBatchName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromBatchName', () => { - const result = client.matchLocationFromBatchName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchBatchFromBatchName', () => { - const result = client.matchBatchFromBatchName(fakePath); - assert.strictEqual(result, "batchValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('nodeGroup', () => { - const fakePath = "/rendered/path/nodeGroup"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - cluster: "clusterValue", - node_group: "nodeGroupValue", - }; - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.nodeGroupPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.nodeGroupPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('nodeGroupPath', () => { - const result = client.nodeGroupPath("projectValue", "regionValue", "clusterValue", "nodeGroupValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromNodeGroupName', () => { - const result = client.matchProjectFromNodeGroupName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromNodeGroupName', () => { - const result = client.matchRegionFromNodeGroupName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchClusterFromNodeGroupName', () => { - const result = client.matchClusterFromNodeGroupName(fakePath); - assert.strictEqual(result, "clusterValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchNodeGroupFromNodeGroupName', () => { - const result = client.matchNodeGroupFromNodeGroupName(fakePath); - assert.strictEqual(result, "nodeGroupValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectLocationAutoscalingPolicy', () => { - const fakePath = "/rendered/path/projectLocationAutoscalingPolicy"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - autoscaling_policy: "autoscalingPolicyValue", - }; - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectLocationAutoscalingPolicyPath', () => { - const result = client.projectLocationAutoscalingPolicyPath("projectValue", "locationValue", "autoscalingPolicyValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchProjectFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchLocationFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "autoscalingPolicyValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectLocationWorkflowTemplate', () => { - const fakePath = "/rendered/path/projectLocationWorkflowTemplate"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - workflow_template: "workflowTemplateValue", - }; - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectLocationWorkflowTemplatePath', () => { - const result = client.projectLocationWorkflowTemplatePath("projectValue", "locationValue", "workflowTemplateValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchProjectFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchLocationFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchWorkflowTemplateFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "workflowTemplateValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectRegionAutoscalingPolicy', () => { - const fakePath = "/rendered/path/projectRegionAutoscalingPolicy"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - autoscaling_policy: "autoscalingPolicyValue", - }; - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectRegionAutoscalingPolicyPath', () => { - const result = client.projectRegionAutoscalingPolicyPath("projectValue", "regionValue", "autoscalingPolicyValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchProjectFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchRegionFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "autoscalingPolicyValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectRegionWorkflowTemplate', () => { - const fakePath = "/rendered/path/projectRegionWorkflowTemplate"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - workflow_template: "workflowTemplateValue", - }; - const client = new jobcontrollerModule.v1.JobControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectRegionWorkflowTemplatePath', () => { - const result = client.projectRegionWorkflowTemplatePath("projectValue", "regionValue", "workflowTemplateValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchProjectFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchRegionFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchWorkflowTemplateFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "workflowTemplateValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - }); -}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_node_group_controller_v1.ts b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_node_group_controller_v1.ts deleted file mode 100644 index 2bf99b38cff..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_node_group_controller_v1.ts +++ /dev/null @@ -1,983 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import {describe, it} from 'mocha'; -import * as nodegroupcontrollerModule from '../src'; - -import {protobuf, LROperation, operationsProtos} from 'google-gax'; - -// Dynamically loaded proto JSON is needed to get the type information -// to fill in default values for request objects -const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); - -// eslint-disable-next-line @typescript-eslint/no-unused-vars -function getTypeDefaultValue(typeName: string, fields: string[]) { - let type = root.lookupType(typeName) as protobuf.Type; - for (const field of fields.slice(0, -1)) { - type = type.fields[field]?.resolvedType as protobuf.Type; - } - return type.fields[fields[fields.length - 1]]?.defaultValue; -} - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { - return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); -} - -function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); -} - -function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); -} - -describe('v1.NodeGroupControllerClient', () => { - describe('Common methods', () => { - it('has servicePath', () => { - const servicePath = nodegroupcontrollerModule.v1.NodeGroupControllerClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = nodegroupcontrollerModule.v1.NodeGroupControllerClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = nodegroupcontrollerModule.v1.NodeGroupControllerClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.nodeGroupControllerStub, undefined); - await client.initialize(); - assert(client.nodeGroupControllerStub); - }); - - it('has close method for the initialized client', done => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.nodeGroupControllerStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.nodeGroupControllerStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - }); - - describe('getNodeGroup', () => { - it('invokes getNodeGroup without error', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetNodeGroupRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetNodeGroupRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.NodeGroup() - ); - client.innerApiCalls.getNodeGroup = stubSimpleCall(expectedResponse); - const [response] = await client.getNodeGroup(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.getNodeGroup as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getNodeGroup as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getNodeGroup without error using callback', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetNodeGroupRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetNodeGroupRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.NodeGroup() - ); - client.innerApiCalls.getNodeGroup = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.getNodeGroup( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.INodeGroup|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.getNodeGroup as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getNodeGroup as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getNodeGroup with error', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetNodeGroupRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetNodeGroupRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.getNodeGroup = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.getNodeGroup(request), expectedError); - const actualRequest = (client.innerApiCalls.getNodeGroup as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getNodeGroup as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getNodeGroup with closed client', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetNodeGroupRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetNodeGroupRequest', ['name']); - request.name = defaultValue1; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.getNodeGroup(request), expectedError); - }); - }); - - describe('createNodeGroup', () => { - it('invokes createNodeGroup without error', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateNodeGroupRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateNodeGroupRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.createNodeGroup = stubLongRunningCall(expectedResponse); - const [operation] = await client.createNodeGroup(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.createNodeGroup as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createNodeGroup as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createNodeGroup without error using callback', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateNodeGroupRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateNodeGroupRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.createNodeGroup = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.createNodeGroup( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.createNodeGroup as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createNodeGroup as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createNodeGroup with call error', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateNodeGroupRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateNodeGroupRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.createNodeGroup = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.createNodeGroup(request), expectedError); - const actualRequest = (client.innerApiCalls.createNodeGroup as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createNodeGroup as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createNodeGroup with LRO error', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateNodeGroupRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateNodeGroupRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.createNodeGroup = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.createNodeGroup(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.createNodeGroup as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createNodeGroup as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkCreateNodeGroupProgress without error', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkCreateNodeGroupProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkCreateNodeGroupProgress with error', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkCreateNodeGroupProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - - describe('resizeNodeGroup', () => { - it('invokes resizeNodeGroup without error', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ResizeNodeGroupRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ResizeNodeGroupRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.resizeNodeGroup = stubLongRunningCall(expectedResponse); - const [operation] = await client.resizeNodeGroup(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.resizeNodeGroup as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.resizeNodeGroup as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes resizeNodeGroup without error using callback', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ResizeNodeGroupRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ResizeNodeGroupRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.resizeNodeGroup = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.resizeNodeGroup( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.resizeNodeGroup as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.resizeNodeGroup as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes resizeNodeGroup with call error', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ResizeNodeGroupRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ResizeNodeGroupRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.resizeNodeGroup = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.resizeNodeGroup(request), expectedError); - const actualRequest = (client.innerApiCalls.resizeNodeGroup as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.resizeNodeGroup as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes resizeNodeGroup with LRO error', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ResizeNodeGroupRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ResizeNodeGroupRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.resizeNodeGroup = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.resizeNodeGroup(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.resizeNodeGroup as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.resizeNodeGroup as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkResizeNodeGroupProgress without error', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkResizeNodeGroupProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkResizeNodeGroupProgress with error', async () => { - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkResizeNodeGroupProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - - describe('Path templates', () => { - - describe('batch', () => { - const fakePath = "/rendered/path/batch"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - batch: "batchValue", - }; - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.batchPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.batchPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('batchPath', () => { - const result = client.batchPath("projectValue", "locationValue", "batchValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.batchPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromBatchName', () => { - const result = client.matchProjectFromBatchName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromBatchName', () => { - const result = client.matchLocationFromBatchName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchBatchFromBatchName', () => { - const result = client.matchBatchFromBatchName(fakePath); - assert.strictEqual(result, "batchValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('clusterRegion', () => { - const fakePath = "/rendered/path/clusterRegion"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - cluster: "clusterValue", - }; - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.clusterRegionPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.clusterRegionPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('clusterRegionPath', () => { - const result = client.clusterRegionPath("projectValue", "regionValue", "clusterValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.clusterRegionPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromClusterRegionName', () => { - const result = client.matchProjectFromClusterRegionName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.clusterRegionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromClusterRegionName', () => { - const result = client.matchRegionFromClusterRegionName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.clusterRegionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchClusterFromClusterRegionName', () => { - const result = client.matchClusterFromClusterRegionName(fakePath); - assert.strictEqual(result, "clusterValue"); - assert((client.pathTemplates.clusterRegionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('nodeGroup', () => { - const fakePath = "/rendered/path/nodeGroup"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - cluster: "clusterValue", - node_group: "nodeGroupValue", - }; - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.nodeGroupPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.nodeGroupPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('nodeGroupPath', () => { - const result = client.nodeGroupPath("projectValue", "regionValue", "clusterValue", "nodeGroupValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromNodeGroupName', () => { - const result = client.matchProjectFromNodeGroupName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromNodeGroupName', () => { - const result = client.matchRegionFromNodeGroupName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchClusterFromNodeGroupName', () => { - const result = client.matchClusterFromNodeGroupName(fakePath); - assert.strictEqual(result, "clusterValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchNodeGroupFromNodeGroupName', () => { - const result = client.matchNodeGroupFromNodeGroupName(fakePath); - assert.strictEqual(result, "nodeGroupValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('project', () => { - const fakePath = "/rendered/path/project"; - const expectedParameters = { - project: "projectValue", - }; - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectPath', () => { - const result = client.projectPath("projectValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectName', () => { - const result = client.matchProjectFromProjectName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectLocationAutoscalingPolicy', () => { - const fakePath = "/rendered/path/projectLocationAutoscalingPolicy"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - autoscaling_policy: "autoscalingPolicyValue", - }; - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectLocationAutoscalingPolicyPath', () => { - const result = client.projectLocationAutoscalingPolicyPath("projectValue", "locationValue", "autoscalingPolicyValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchProjectFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchLocationFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "autoscalingPolicyValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectLocationWorkflowTemplate', () => { - const fakePath = "/rendered/path/projectLocationWorkflowTemplate"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - workflow_template: "workflowTemplateValue", - }; - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectLocationWorkflowTemplatePath', () => { - const result = client.projectLocationWorkflowTemplatePath("projectValue", "locationValue", "workflowTemplateValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchProjectFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchLocationFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchWorkflowTemplateFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "workflowTemplateValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectRegionAutoscalingPolicy', () => { - const fakePath = "/rendered/path/projectRegionAutoscalingPolicy"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - autoscaling_policy: "autoscalingPolicyValue", - }; - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectRegionAutoscalingPolicyPath', () => { - const result = client.projectRegionAutoscalingPolicyPath("projectValue", "regionValue", "autoscalingPolicyValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchProjectFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchRegionFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "autoscalingPolicyValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectRegionWorkflowTemplate', () => { - const fakePath = "/rendered/path/projectRegionWorkflowTemplate"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - workflow_template: "workflowTemplateValue", - }; - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectRegionWorkflowTemplatePath', () => { - const result = client.projectRegionWorkflowTemplatePath("projectValue", "regionValue", "workflowTemplateValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchProjectFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchRegionFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchWorkflowTemplateFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "workflowTemplateValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('region', () => { - const fakePath = "/rendered/path/region"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - }; - const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.regionPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.regionPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('regionPath', () => { - const result = client.regionPath("projectValue", "regionValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.regionPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromRegionName', () => { - const result = client.matchProjectFromRegionName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.regionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromRegionName', () => { - const result = client.matchRegionFromRegionName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.regionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - }); -}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_workflow_template_service_v1.ts b/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_workflow_template_service_v1.ts deleted file mode 100644 index 72c13b7199a..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/test/gapic_workflow_template_service_v1.ts +++ /dev/null @@ -1,1557 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import {describe, it} from 'mocha'; -import * as workflowtemplateserviceModule from '../src'; - -import {PassThrough} from 'stream'; - -import {protobuf, LROperation, operationsProtos} from 'google-gax'; - -// Dynamically loaded proto JSON is needed to get the type information -// to fill in default values for request objects -const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); - -// eslint-disable-next-line @typescript-eslint/no-unused-vars -function getTypeDefaultValue(typeName: string, fields: string[]) { - let type = root.lookupType(typeName) as protobuf.Type; - for (const field of fields.slice(0, -1)) { - type = type.fields[field]?.resolvedType as protobuf.Type; - } - return type.fields[fields[fields.length - 1]]?.defaultValue; -} - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { - return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); -} - -function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); -} - -function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); -} - -function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { - const pagingStub = sinon.stub(); - if (responses) { - for (let i = 0; i < responses.length; ++i) { - pagingStub.onCall(i).callsArgWith(2, null, responses[i]); - } - } - const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; - const mockStream = new PassThrough({ - objectMode: true, - transform: transformStub, - }); - // trigger as many responses as needed - if (responses) { - for (let i = 0; i < responses.length; ++i) { - setImmediate(() => { mockStream.write({}); }); - } - setImmediate(() => { mockStream.end(); }); - } else { - setImmediate(() => { mockStream.write({}); }); - setImmediate(() => { mockStream.end(); }); - } - return sinon.stub().returns(mockStream); -} - -function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { - let counter = 0; - const asyncIterable = { - [Symbol.asyncIterator]() { - return { - async next() { - if (error) { - return Promise.reject(error); - } - if (counter >= responses!.length) { - return Promise.resolve({done: true, value: undefined}); - } - return Promise.resolve({done: false, value: responses![counter++]}); - } - }; - } - }; - return sinon.stub().returns(asyncIterable); -} - -describe('v1.WorkflowTemplateServiceClient', () => { - describe('Common methods', () => { - it('has servicePath', () => { - const servicePath = workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.workflowTemplateServiceStub, undefined); - await client.initialize(); - assert(client.workflowTemplateServiceStub); - }); - - it('has close method for the initialized client', done => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.workflowTemplateServiceStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.workflowTemplateServiceStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - }); - - describe('createWorkflowTemplate', () => { - it('invokes createWorkflowTemplate without error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.WorkflowTemplate() - ); - client.innerApiCalls.createWorkflowTemplate = stubSimpleCall(expectedResponse); - const [response] = await client.createWorkflowTemplate(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.createWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createWorkflowTemplate without error using callback', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.WorkflowTemplate() - ); - client.innerApiCalls.createWorkflowTemplate = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.createWorkflowTemplate( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IWorkflowTemplate|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.createWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createWorkflowTemplate with error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.createWorkflowTemplate = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.createWorkflowTemplate(request), expectedError); - const actualRequest = (client.innerApiCalls.createWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.createWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes createWorkflowTemplate with closed client', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest', ['parent']); - request.parent = defaultValue1; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.createWorkflowTemplate(request), expectedError); - }); - }); - - describe('getWorkflowTemplate', () => { - it('invokes getWorkflowTemplate without error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetWorkflowTemplateRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.WorkflowTemplate() - ); - client.innerApiCalls.getWorkflowTemplate = stubSimpleCall(expectedResponse); - const [response] = await client.getWorkflowTemplate(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.getWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getWorkflowTemplate without error using callback', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetWorkflowTemplateRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.WorkflowTemplate() - ); - client.innerApiCalls.getWorkflowTemplate = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.getWorkflowTemplate( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IWorkflowTemplate|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.getWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getWorkflowTemplate with error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetWorkflowTemplateRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.getWorkflowTemplate = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.getWorkflowTemplate(request), expectedError); - const actualRequest = (client.innerApiCalls.getWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.getWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes getWorkflowTemplate with closed client', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.GetWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.GetWorkflowTemplateRequest', ['name']); - request.name = defaultValue1; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.getWorkflowTemplate(request), expectedError); - }); - }); - - describe('updateWorkflowTemplate', () => { - it('invokes updateWorkflowTemplate without error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest() - ); - request.template ??= {}; - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest', ['template', 'name']); - request.template.name = defaultValue1; - const expectedHeaderRequestParams = `template.name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.WorkflowTemplate() - ); - client.innerApiCalls.updateWorkflowTemplate = stubSimpleCall(expectedResponse); - const [response] = await client.updateWorkflowTemplate(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.updateWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.updateWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes updateWorkflowTemplate without error using callback', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest() - ); - request.template ??= {}; - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest', ['template', 'name']); - request.template.name = defaultValue1; - const expectedHeaderRequestParams = `template.name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.cloud.dataproc.v1.WorkflowTemplate() - ); - client.innerApiCalls.updateWorkflowTemplate = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.updateWorkflowTemplate( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IWorkflowTemplate|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.updateWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.updateWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes updateWorkflowTemplate with error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest() - ); - request.template ??= {}; - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest', ['template', 'name']); - request.template.name = defaultValue1; - const expectedHeaderRequestParams = `template.name=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.updateWorkflowTemplate = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.updateWorkflowTemplate(request), expectedError); - const actualRequest = (client.innerApiCalls.updateWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.updateWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes updateWorkflowTemplate with closed client', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest() - ); - request.template ??= {}; - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest', ['template', 'name']); - request.template.name = defaultValue1; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.updateWorkflowTemplate(request), expectedError); - }); - }); - - describe('deleteWorkflowTemplate', () => { - it('invokes deleteWorkflowTemplate without error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.innerApiCalls.deleteWorkflowTemplate = stubSimpleCall(expectedResponse); - const [response] = await client.deleteWorkflowTemplate(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.deleteWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteWorkflowTemplate without error using callback', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.protobuf.Empty() - ); - client.innerApiCalls.deleteWorkflowTemplate = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.deleteWorkflowTemplate( - request, - (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.deleteWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteWorkflowTemplate with error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.deleteWorkflowTemplate = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.deleteWorkflowTemplate(request), expectedError); - const actualRequest = (client.innerApiCalls.deleteWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.deleteWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes deleteWorkflowTemplate with closed client', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest', ['name']); - request.name = defaultValue1; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.deleteWorkflowTemplate(request), expectedError); - }); - }); - - describe('instantiateWorkflowTemplate', () => { - it('invokes instantiateWorkflowTemplate without error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.instantiateWorkflowTemplate = stubLongRunningCall(expectedResponse); - const [operation] = await client.instantiateWorkflowTemplate(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes instantiateWorkflowTemplate without error using callback', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.instantiateWorkflowTemplate = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.instantiateWorkflowTemplate( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes instantiateWorkflowTemplate with call error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.instantiateWorkflowTemplate = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.instantiateWorkflowTemplate(request), expectedError); - const actualRequest = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes instantiateWorkflowTemplate with LRO error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest', ['name']); - request.name = defaultValue1; - const expectedHeaderRequestParams = `name=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.instantiateWorkflowTemplate = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.instantiateWorkflowTemplate(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.instantiateWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkInstantiateWorkflowTemplateProgress without error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkInstantiateWorkflowTemplateProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkInstantiateWorkflowTemplateProgress with error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkInstantiateWorkflowTemplateProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - - describe('instantiateInlineWorkflowTemplate', () => { - it('invokes instantiateInlineWorkflowTemplate without error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.instantiateInlineWorkflowTemplate = stubLongRunningCall(expectedResponse); - const [operation] = await client.instantiateInlineWorkflowTemplate(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes instantiateInlineWorkflowTemplate without error using callback', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = generateSampleMessage( - new protos.google.longrunning.Operation() - ); - client.innerApiCalls.instantiateInlineWorkflowTemplate = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.instantiateInlineWorkflowTemplate( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes instantiateInlineWorkflowTemplate with call error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.instantiateInlineWorkflowTemplate = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.instantiateInlineWorkflowTemplate(request), expectedError); - const actualRequest = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes instantiateInlineWorkflowTemplate with LRO error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.instantiateInlineWorkflowTemplate = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.instantiateInlineWorkflowTemplate(request); - await assert.rejects(operation.promise(), expectedError); - const actualRequest = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.instantiateInlineWorkflowTemplate as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes checkInstantiateInlineWorkflowTemplateProgress without error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage( - new operationsProtos.google.longrunning.Operation() - ); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkInstantiateInlineWorkflowTemplateProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkInstantiateInlineWorkflowTemplateProgress with error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkInstantiateInlineWorkflowTemplateProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); - - describe('listWorkflowTemplates', () => { - it('invokes listWorkflowTemplates without error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`;const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), - ]; - client.innerApiCalls.listWorkflowTemplates = stubSimpleCall(expectedResponse); - const [response] = await client.listWorkflowTemplates(request); - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.listWorkflowTemplates as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listWorkflowTemplates as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listWorkflowTemplates without error using callback', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`;const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), - ]; - client.innerApiCalls.listWorkflowTemplates = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.listWorkflowTemplates( - request, - (err?: Error|null, result?: protos.google.cloud.dataproc.v1.IWorkflowTemplate[]|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - const actualRequest = (client.innerApiCalls.listWorkflowTemplates as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listWorkflowTemplates as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listWorkflowTemplates with error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.innerApiCalls.listWorkflowTemplates = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.listWorkflowTemplates(request), expectedError); - const actualRequest = (client.innerApiCalls.listWorkflowTemplates as SinonStub) - .getCall(0).args[0]; - assert.deepStrictEqual(actualRequest, request); - const actualHeaderRequestParams = (client.innerApiCalls.listWorkflowTemplates as SinonStub) - .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; - assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); - }); - - it('invokes listWorkflowTemplatesStream without error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), - ]; - client.descriptors.page.listWorkflowTemplates.createStream = stubPageStreamingCall(expectedResponse); - const stream = client.listWorkflowTemplatesStream(request); - const promise = new Promise((resolve, reject) => { - const responses: protos.google.cloud.dataproc.v1.WorkflowTemplate[] = []; - stream.on('data', (response: protos.google.cloud.dataproc.v1.WorkflowTemplate) => { - responses.push(response); - }); - stream.on('end', () => { - resolve(responses); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - const responses = await promise; - assert.deepStrictEqual(responses, expectedResponse); - assert((client.descriptors.page.listWorkflowTemplates.createStream as SinonStub) - .getCall(0).calledWith(client.innerApiCalls.listWorkflowTemplates, request)); - assert( - (client.descriptors.page.listWorkflowTemplates.createStream as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('invokes listWorkflowTemplatesStream with error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.descriptors.page.listWorkflowTemplates.createStream = stubPageStreamingCall(undefined, expectedError); - const stream = client.listWorkflowTemplatesStream(request); - const promise = new Promise((resolve, reject) => { - const responses: protos.google.cloud.dataproc.v1.WorkflowTemplate[] = []; - stream.on('data', (response: protos.google.cloud.dataproc.v1.WorkflowTemplate) => { - responses.push(response); - }); - stream.on('end', () => { - resolve(responses); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - await assert.rejects(promise, expectedError); - assert((client.descriptors.page.listWorkflowTemplates.createStream as SinonStub) - .getCall(0).calledWith(client.innerApiCalls.listWorkflowTemplates, request)); - assert( - (client.descriptors.page.listWorkflowTemplates.createStream as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('uses async iteration with listWorkflowTemplates without error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedResponse = [ - generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), - generateSampleMessage(new protos.google.cloud.dataproc.v1.WorkflowTemplate()), - ]; - client.descriptors.page.listWorkflowTemplates.asyncIterate = stubAsyncIterationCall(expectedResponse); - const responses: protos.google.cloud.dataproc.v1.IWorkflowTemplate[] = []; - const iterable = client.listWorkflowTemplatesAsync(request); - for await (const resource of iterable) { - responses.push(resource!); - } - assert.deepStrictEqual(responses, expectedResponse); - assert.deepStrictEqual( - (client.descriptors.page.listWorkflowTemplates.asyncIterate as SinonStub) - .getCall(0).args[1], request); - assert( - (client.descriptors.page.listWorkflowTemplates.asyncIterate as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - - it('uses async iteration with listWorkflowTemplates with error', async () => { - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage( - new protos.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest() - ); - const defaultValue1 = - getTypeDefaultValue('.google.cloud.dataproc.v1.ListWorkflowTemplatesRequest', ['parent']); - request.parent = defaultValue1; - const expectedHeaderRequestParams = `parent=${defaultValue1}`; - const expectedError = new Error('expected'); - client.descriptors.page.listWorkflowTemplates.asyncIterate = stubAsyncIterationCall(undefined, expectedError); - const iterable = client.listWorkflowTemplatesAsync(request); - await assert.rejects(async () => { - const responses: protos.google.cloud.dataproc.v1.IWorkflowTemplate[] = []; - for await (const resource of iterable) { - responses.push(resource!); - } - }); - assert.deepStrictEqual( - (client.descriptors.page.listWorkflowTemplates.asyncIterate as SinonStub) - .getCall(0).args[1], request); - assert( - (client.descriptors.page.listWorkflowTemplates.asyncIterate as SinonStub) - .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( - expectedHeaderRequestParams - ) - ); - }); - }); - - describe('Path templates', () => { - - describe('batch', () => { - const fakePath = "/rendered/path/batch"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - batch: "batchValue", - }; - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.batchPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.batchPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('batchPath', () => { - const result = client.batchPath("projectValue", "locationValue", "batchValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.batchPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromBatchName', () => { - const result = client.matchProjectFromBatchName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromBatchName', () => { - const result = client.matchLocationFromBatchName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchBatchFromBatchName', () => { - const result = client.matchBatchFromBatchName(fakePath); - assert.strictEqual(result, "batchValue"); - assert((client.pathTemplates.batchPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('nodeGroup', () => { - const fakePath = "/rendered/path/nodeGroup"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - cluster: "clusterValue", - node_group: "nodeGroupValue", - }; - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.nodeGroupPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.nodeGroupPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('nodeGroupPath', () => { - const result = client.nodeGroupPath("projectValue", "regionValue", "clusterValue", "nodeGroupValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromNodeGroupName', () => { - const result = client.matchProjectFromNodeGroupName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromNodeGroupName', () => { - const result = client.matchRegionFromNodeGroupName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchClusterFromNodeGroupName', () => { - const result = client.matchClusterFromNodeGroupName(fakePath); - assert.strictEqual(result, "clusterValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchNodeGroupFromNodeGroupName', () => { - const result = client.matchNodeGroupFromNodeGroupName(fakePath); - assert.strictEqual(result, "nodeGroupValue"); - assert((client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('project', () => { - const fakePath = "/rendered/path/project"; - const expectedParameters = { - project: "projectValue", - }; - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectPath', () => { - const result = client.projectPath("projectValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectName', () => { - const result = client.matchProjectFromProjectName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectLocationAutoscalingPolicy', () => { - const fakePath = "/rendered/path/projectLocationAutoscalingPolicy"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - autoscaling_policy: "autoscalingPolicyValue", - }; - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectLocationAutoscalingPolicyPath', () => { - const result = client.projectLocationAutoscalingPolicyPath("projectValue", "locationValue", "autoscalingPolicyValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchProjectFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchLocationFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName', () => { - const result = client.matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "autoscalingPolicyValue"); - assert((client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectLocationWorkflowTemplate', () => { - const fakePath = "/rendered/path/projectLocationWorkflowTemplate"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - workflow_template: "workflowTemplateValue", - }; - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectLocationWorkflowTemplatePath', () => { - const result = client.projectLocationWorkflowTemplatePath("projectValue", "locationValue", "workflowTemplateValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchProjectFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchLocationFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchWorkflowTemplateFromProjectLocationWorkflowTemplateName', () => { - const result = client.matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(fakePath); - assert.strictEqual(result, "workflowTemplateValue"); - assert((client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectRegionAutoscalingPolicy', () => { - const fakePath = "/rendered/path/projectRegionAutoscalingPolicy"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - autoscaling_policy: "autoscalingPolicyValue", - }; - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectRegionAutoscalingPolicyPath', () => { - const result = client.projectRegionAutoscalingPolicyPath("projectValue", "regionValue", "autoscalingPolicyValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchProjectFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchRegionFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName', () => { - const result = client.matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(fakePath); - assert.strictEqual(result, "autoscalingPolicyValue"); - assert((client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('projectRegionWorkflowTemplate', () => { - const fakePath = "/rendered/path/projectRegionWorkflowTemplate"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - workflow_template: "workflowTemplateValue", - }; - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectRegionWorkflowTemplatePath', () => { - const result = client.projectRegionWorkflowTemplatePath("projectValue", "regionValue", "workflowTemplateValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchProjectFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchRegionFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchWorkflowTemplateFromProjectRegionWorkflowTemplateName', () => { - const result = client.matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(fakePath); - assert.strictEqual(result, "workflowTemplateValue"); - assert((client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('region', () => { - const fakePath = "/rendered/path/region"; - const expectedParameters = { - project: "projectValue", - region: "regionValue", - }; - const client = new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.regionPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.regionPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('regionPath', () => { - const result = client.regionPath("projectValue", "regionValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.regionPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromRegionName', () => { - const result = client.matchProjectFromRegionName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.regionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchRegionFromRegionName', () => { - const result = client.matchRegionFromRegionName(fakePath); - assert.strictEqual(result, "regionValue"); - assert((client.pathTemplates.regionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - }); -}); diff --git a/owl-bot-staging/google-cloud-dataproc/v1/tsconfig.json b/owl-bot-staging/google-cloud-dataproc/v1/tsconfig.json deleted file mode 100644 index c78f1c884ef..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/tsconfig.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "extends": "./node_modules/gts/tsconfig-google.json", - "compilerOptions": { - "rootDir": ".", - "outDir": "build", - "resolveJsonModule": true, - "lib": [ - "es2018", - "dom" - ] - }, - "include": [ - "src/*.ts", - "src/**/*.ts", - "test/*.ts", - "test/**/*.ts", - "system-test/*.ts" - ] -} diff --git a/owl-bot-staging/google-cloud-dataproc/v1/webpack.config.js b/owl-bot-staging/google-cloud-dataproc/v1/webpack.config.js deleted file mode 100644 index 050b272c0e0..00000000000 --- a/owl-bot-staging/google-cloud-dataproc/v1/webpack.config.js +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const path = require('path'); - -module.exports = { - entry: './src/index.ts', - output: { - library: 'dataproc', - filename: './dataproc.js', - }, - node: { - child_process: 'empty', - fs: 'empty', - crypto: 'empty', - }, - resolve: { - alias: { - '../../../package.json': path.resolve(__dirname, 'package.json'), - }, - extensions: ['.js', '.json', '.ts'], - }, - module: { - rules: [ - { - test: /\.tsx?$/, - use: 'ts-loader', - exclude: /node_modules/ - }, - { - test: /node_modules[\\/]@grpc[\\/]grpc-js/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]grpc/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]retry-request/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]https?-proxy-agent/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]gtoken/, - use: 'null-loader' - }, - ], - }, - mode: 'production', -}; diff --git a/packages/google-cloud-dataproc/README.md b/packages/google-cloud-dataproc/README.md index 1733e648544..93bf7cbb2e8 100644 --- a/packages/google-cloud-dataproc/README.md +++ b/packages/google-cloud-dataproc/README.md @@ -200,6 +200,9 @@ Samples are in the [`samples/`](https://github.com/googleapis/google-cloud-node/ | Job_controller.submit_job | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-dataproc/samples/generated/v1/job_controller.submit_job.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-dataproc/samples/generated/v1/job_controller.submit_job.js,samples/README.md) | | Job_controller.submit_job_as_operation | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-dataproc/samples/generated/v1/job_controller.submit_job_as_operation.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-dataproc/samples/generated/v1/job_controller.submit_job_as_operation.js,samples/README.md) | | Job_controller.update_job | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-dataproc/samples/generated/v1/job_controller.update_job.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-dataproc/samples/generated/v1/job_controller.update_job.js,samples/README.md) | +| Node_group_controller.create_node_group | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.create_node_group.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.create_node_group.js,samples/README.md) | +| Node_group_controller.get_node_group | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.get_node_group.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.get_node_group.js,samples/README.md) | +| Node_group_controller.resize_node_group | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.resize_node_group.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.resize_node_group.js,samples/README.md) | | Workflow_template_service.create_workflow_template | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-dataproc/samples/generated/v1/workflow_template_service.create_workflow_template.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-dataproc/samples/generated/v1/workflow_template_service.create_workflow_template.js,samples/README.md) | | Workflow_template_service.delete_workflow_template | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-dataproc/samples/generated/v1/workflow_template_service.delete_workflow_template.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-dataproc/samples/generated/v1/workflow_template_service.delete_workflow_template.js,samples/README.md) | | Workflow_template_service.get_workflow_template | [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-dataproc/samples/generated/v1/workflow_template_service.get_workflow_template.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-dataproc/samples/generated/v1/workflow_template_service.get_workflow_template.js,samples/README.md) | diff --git a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/clusters.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/clusters.proto index 9550278b999..11611fbf680 100644 --- a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/clusters.proto +++ b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/clusters.proto @@ -43,12 +43,14 @@ option (google.api.resource_definition) = { // of Compute Engine instances. service ClusterController { option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; // Creates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { + rpc CreateCluster(CreateClusterRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/clusters" body: "cluster" @@ -63,14 +65,17 @@ service ClusterController { // Updates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - // The cluster must be in a [`RUNNING`][google.cloud.dataproc.v1.ClusterStatus.State] state or an error + // The cluster must be in a + // [`RUNNING`][google.cloud.dataproc.v1.ClusterStatus.State] state or an error // is returned. - rpc UpdateCluster(UpdateClusterRequest) returns (google.longrunning.Operation) { + rpc UpdateCluster(UpdateClusterRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { patch: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" body: "cluster" }; - option (google.api.method_signature) = "project_id,region,cluster_name,cluster,update_mask"; + option (google.api.method_signature) = + "project_id,region,cluster_name,cluster,update_mask"; option (google.longrunning.operation_info) = { response_type: "Cluster" metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" @@ -104,7 +109,8 @@ service ClusterController { // Deletes a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - rpc DeleteCluster(DeleteClusterRequest) returns (google.longrunning.Operation) { + rpc DeleteCluster(DeleteClusterRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { delete: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" }; @@ -139,7 +145,8 @@ service ClusterController { // [Operation.response][google.longrunning.Operation.response] // contains // [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). - rpc DiagnoseCluster(DiagnoseClusterRequest) returns (google.longrunning.Operation) { + rpc DiagnoseCluster(DiagnoseClusterRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose" body: "*" @@ -158,8 +165,10 @@ message Cluster { // Required. The Google Cloud Platform project ID that the cluster belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The cluster name. Cluster names within a project must be - // unique. Names of deleted clusters can be reused. + // Required. The cluster name, which must be unique within a project. + // The name must start with a lowercase letter, and can contain + // up to 51 lowercase letters, numbers, and hyphens. It cannot end + // with a hyphen. The name of a deleted cluster can be reused. string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; // Optional. The cluster config for a cluster of Compute Engine Instances. @@ -167,14 +176,17 @@ message Cluster { // when clusters are updated. ClusterConfig config = 3 [(google.api.field_behavior) = OPTIONAL]; - // Optional. The virtual cluster config, used when creating a Dataproc cluster that - // does not directly control the underlying compute resources, for example, - // when creating a [Dataproc-on-GKE - // cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster). - // Note that Dataproc may set default values, and values may change when - // clusters are updated. Exactly one of config or virtualClusterConfig must be - // specified. - VirtualClusterConfig virtual_cluster_config = 10 [(google.api.field_behavior) = OPTIONAL]; + // Optional. The virtual cluster config is used when creating a Dataproc + // cluster that does not directly control the underlying compute resources, + // for example, when creating a [Dataproc-on-GKE + // cluster](https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke). + // Dataproc may set default values, and values may change when + // clusters are updated. Exactly one of + // [config][google.cloud.dataproc.v1.Cluster.config] or + // [virtual_cluster_config][google.cloud.dataproc.v1.Cluster.virtual_cluster_config] + // must be specified. + VirtualClusterConfig virtual_cluster_config = 10 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The labels to associate with this cluster. // Label **keys** must contain 1 to 63 characters, and must conform to @@ -189,7 +201,8 @@ message Cluster { ClusterStatus status = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The previous cluster status. - repeated ClusterStatus status_history = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated ClusterStatus status_history = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. A cluster UUID (Unique Universal Identifier). Dataproc // generates this value when it creates the cluster. @@ -217,15 +230,13 @@ message ClusterConfig { // a Cloud Storage bucket.** string config_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; - // Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, - // such as Spark and MapReduce history files. - // If you do not specify a temp bucket, - // Dataproc will determine a Cloud Storage location (US, - // ASIA, or EU) for your cluster's temp bucket according to the - // Compute Engine zone where your cluster is deployed, and then create - // and manage this project-level, per-location bucket. The default bucket has - // a TTL of 90 days, but you can use any TTL (or none) if you specify a - // bucket (see + // Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs + // data, such as Spark and MapReduce history files. If you do not specify a + // temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or + // EU) for your cluster's temp bucket according to the Compute Engine zone + // where your cluster is deployed, and then create and manage this + // project-level, per-location bucket. The default bucket has a TTL of 90 + // days, but you can use any TTL (or none) if you specify a bucket (see // [Dataproc staging and temp // buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). // **This field requires a Cloud Storage bucket name, not a `gs://...` URI to @@ -234,19 +245,23 @@ message ClusterConfig { // Optional. The shared Compute Engine config settings for // all instances in a cluster. - GceClusterConfig gce_cluster_config = 8 [(google.api.field_behavior) = OPTIONAL]; + GceClusterConfig gce_cluster_config = 8 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The Compute Engine config settings for // the cluster's master instance. - InstanceGroupConfig master_config = 9 [(google.api.field_behavior) = OPTIONAL]; + InstanceGroupConfig master_config = 9 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The Compute Engine config settings for // the cluster's worker instances. - InstanceGroupConfig worker_config = 10 [(google.api.field_behavior) = OPTIONAL]; + InstanceGroupConfig worker_config = 10 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The Compute Engine config settings for // a cluster's secondary worker instances - InstanceGroupConfig secondary_worker_config = 12 [(google.api.field_behavior) = OPTIONAL]; + InstanceGroupConfig secondary_worker_config = 12 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The config settings for cluster software. SoftwareConfig software_config = 13 [(google.api.field_behavior) = OPTIONAL]; @@ -264,36 +279,46 @@ message ClusterConfig { // else // ... worker specific actions ... // fi - repeated NodeInitializationAction initialization_actions = 11 [(google.api.field_behavior) = OPTIONAL]; + repeated NodeInitializationAction initialization_actions = 11 + [(google.api.field_behavior) = OPTIONAL]; // Optional. Encryption settings for the cluster. - EncryptionConfig encryption_config = 15 [(google.api.field_behavior) = OPTIONAL]; + EncryptionConfig encryption_config = 15 + [(google.api.field_behavior) = OPTIONAL]; // Optional. Autoscaling config for the policy associated with the cluster. // Cluster does not autoscale if this field is unset. - AutoscalingConfig autoscaling_config = 18 [(google.api.field_behavior) = OPTIONAL]; + AutoscalingConfig autoscaling_config = 18 + [(google.api.field_behavior) = OPTIONAL]; // Optional. Security settings for the cluster. SecurityConfig security_config = 16 [(google.api.field_behavior) = OPTIONAL]; // Optional. Lifecycle setting for the cluster. - LifecycleConfig lifecycle_config = 17 [(google.api.field_behavior) = OPTIONAL]; + LifecycleConfig lifecycle_config = 17 + [(google.api.field_behavior) = OPTIONAL]; // Optional. Port/endpoint configuration for this cluster EndpointConfig endpoint_config = 19 [(google.api.field_behavior) = OPTIONAL]; // Optional. Metastore configuration. - MetastoreConfig metastore_config = 20 [(google.api.field_behavior) = OPTIONAL]; + MetastoreConfig metastore_config = 20 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The config for Dataproc metrics. - DataprocMetricConfig dataproc_metric_config = 23 [(google.api.field_behavior) = OPTIONAL]; + DataprocMetricConfig dataproc_metric_config = 23 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The node group settings. + repeated AuxiliaryNodeGroup auxiliary_node_groups = 25 + [(google.api.field_behavior) = OPTIONAL]; } -// Dataproc cluster config for a cluster that does not directly control the +// The Dataproc cluster config for a cluster that does not directly control the // underlying compute resources, such as a [Dataproc-on-GKE -// cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster). +// cluster](https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke). message VirtualClusterConfig { - // Optional. A Storage bucket used to stage job + // Optional. A Cloud Storage bucket used to stage job // dependencies, config files, and job driver console output. // If you do not specify a staging bucket, Cloud // Dataproc will determine a Cloud Storage location (US, @@ -307,12 +332,15 @@ message VirtualClusterConfig { string staging_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; oneof infrastructure_config { - // Required. The configuration for running the Dataproc cluster on Kubernetes. - KubernetesClusterConfig kubernetes_cluster_config = 6 [(google.api.field_behavior) = REQUIRED]; + // Required. The configuration for running the Dataproc cluster on + // Kubernetes. + KubernetesClusterConfig kubernetes_cluster_config = 6 + [(google.api.field_behavior) = REQUIRED]; } // Optional. Configuration of auxiliary services used by this cluster. - AuxiliaryServicesConfig auxiliary_services_config = 7 [(google.api.field_behavior) = OPTIONAL]; + AuxiliaryServicesConfig auxiliary_services_config = 7 + [(google.api.field_behavior) = OPTIONAL]; } // Auxiliary services configuration for a Cluster. @@ -321,14 +349,16 @@ message AuxiliaryServicesConfig { MetastoreConfig metastore_config = 1 [(google.api.field_behavior) = OPTIONAL]; // Optional. The Spark History Server configuration for the workload. - SparkHistoryServerConfig spark_history_server_config = 2 [(google.api.field_behavior) = OPTIONAL]; + SparkHistoryServerConfig spark_history_server_config = 2 + [(google.api.field_behavior) = OPTIONAL]; } // Endpoint config for this cluster message EndpointConfig { // Output only. The map of port descriptions to URLs. Will only be populated // if enable_http_port_access is true. - map http_ports = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + map http_ports = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. If true, enable http access to specific ports on the cluster // from external sources. Defaults to false. @@ -366,7 +396,8 @@ message GceClusterConfig { // fields](https://cloud.google.com/compute/docs/reference/rest/v1/instances). enum PrivateIpv6GoogleAccess { // If unspecified, Compute Engine default behavior will apply, which - // is the same as [INHERIT_FROM_SUBNETWORK][google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess.INHERIT_FROM_SUBNETWORK]. + // is the same as + // [INHERIT_FROM_SUBNETWORK][google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess.INHERIT_FROM_SUBNETWORK]. PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED = 0; // Private access to and from Google Services configuration @@ -429,7 +460,8 @@ message GceClusterConfig { bool internal_ip_only = 7 [(google.api.field_behavior) = OPTIONAL]; // Optional. The type of IPv6 access for a cluster. - PrivateIpv6GoogleAccess private_ipv6_google_access = 12 [(google.api.field_behavior) = OPTIONAL]; + PrivateIpv6GoogleAccess private_ipv6_google_access = 12 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The [Dataproc service // account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) @@ -458,7 +490,8 @@ message GceClusterConfig { // * https://www.googleapis.com/auth/bigtable.admin.table // * https://www.googleapis.com/auth/bigtable.data // * https://www.googleapis.com/auth/devstorage.full_control - repeated string service_account_scopes = 3 [(google.api.field_behavior) = OPTIONAL]; + repeated string service_account_scopes = 3 + [(google.api.field_behavior) = OPTIONAL]; // The Compute Engine tags to add to all instances (see [Tagging // instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). @@ -470,21 +503,28 @@ message GceClusterConfig { map metadata = 5; // Optional. Reservation Affinity for consuming Zonal reservation. - ReservationAffinity reservation_affinity = 11 [(google.api.field_behavior) = OPTIONAL]; + ReservationAffinity reservation_affinity = 11 + [(google.api.field_behavior) = OPTIONAL]; // Optional. Node Group Affinity for sole-tenant clusters. - NodeGroupAffinity node_group_affinity = 13 [(google.api.field_behavior) = OPTIONAL]; + NodeGroupAffinity node_group_affinity = 13 + [(google.api.field_behavior) = OPTIONAL]; - // Optional. Shielded Instance Config for clusters using [Compute Engine Shielded + // Optional. Shielded Instance Config for clusters using [Compute Engine + // Shielded // VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm). - ShieldedInstanceConfig shielded_instance_config = 14 [(google.api.field_behavior) = OPTIONAL]; + ShieldedInstanceConfig shielded_instance_config = 14 + [(google.api.field_behavior) = OPTIONAL]; // Optional. Confidential Instance Config for clusters using [Confidential // VMs](https://cloud.google.com/compute/confidential-vm/docs). - ConfidentialInstanceConfig confidential_instance_config = 15 [(google.api.field_behavior) = OPTIONAL]; + ConfidentialInstanceConfig confidential_instance_config = 15 + [(google.api.field_behavior) = OPTIONAL]; } // Node Group Affinity for clusters using sole-tenant node groups. +// **The Dataproc `NodeGroupAffinity` resource is not related to the +// Dataproc [NodeGroup][google.cloud.dataproc.v1.NodeGroup] resource.** message NodeGroupAffinity { // Required. The URI of a // sole-tenant [node group @@ -515,17 +555,15 @@ message ShieldedInstanceConfig { // Confidential Instance Config for clusters using [Confidential // VMs](https://cloud.google.com/compute/confidential-vm/docs) message ConfidentialInstanceConfig { - // Optional. Defines whether the instance should have confidential compute enabled. + // Optional. Defines whether the instance should have confidential compute + // enabled. bool enable_confidential_compute = 1 [(google.api.field_behavior) = OPTIONAL]; } // The config settings for Compute Engine resources in // an instance group, such as a master or worker group. message InstanceGroupConfig { - // Controls the use of - // [preemptible instances] - // (https://cloud.google.com/compute/docs/instances/preemptible) - // within the group. + // Controls the use of preemptible instances within the group. enum Preemptibility { // Preemptibility is unspecified, the system will choose the // appropriate setting for each instance group. @@ -537,9 +575,12 @@ message InstanceGroupConfig { // value for Master and Worker instance groups. NON_PREEMPTIBLE = 1; - // Instances are preemptible. + // Instances are [preemptible] + // (https://cloud.google.com/compute/docs/instances/preemptible). // - // This option is allowed only for secondary worker groups. + // This option is allowed only for [secondary worker] + // (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) + // groups. PREEMPTIBLE = 2; } @@ -553,7 +594,8 @@ message InstanceGroupConfig { // Output only. The list of instance names. Dataproc derives the names // from `cluster_name`, `num_instances`, and the instance group. - repeated string instance_names = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated string instance_names = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. The Compute Engine image resource used for cluster instances. // @@ -609,11 +651,13 @@ message InstanceGroupConfig { // Output only. The config for Compute Engine Instance Group // Manager that manages this group. // This is only used for preemptible instance groups. - ManagedGroupConfig managed_group_config = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + ManagedGroupConfig managed_group_config = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. The Compute Engine accelerator configuration for these // instances. - repeated AcceleratorConfig accelerators = 8 [(google.api.field_behavior) = OPTIONAL]; + repeated AcceleratorConfig accelerators = 8 + [(google.api.field_behavior) = OPTIONAL]; // Optional. Specifies the minimum cpu platform for the Instance Group. // See [Dataproc -> Minimum CPU @@ -628,7 +672,8 @@ message ManagedGroupConfig { string instance_template_name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The name of the Instance Group Manager for this group. - string instance_group_manager_name = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + string instance_group_manager_name = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; } // Specifies the type and number of accelerator cards attached to the instances @@ -669,7 +714,7 @@ message DiskConfig { // Optional. Size in GB of the boot disk (default is 500GB). int32 boot_disk_size_gb = 1 [(google.api.field_behavior) = OPTIONAL]; - // Optional. Number of attached SSDs, from 0 to 4 (default is 0). + // Optional. Number of attached SSDs, from 0 to 8 (default is 0). // If SSDs are not attached, the boot disk is used to store runtime logs and // [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. // If one or more SSDs are attached, this runtime bulk @@ -685,6 +730,59 @@ message DiskConfig { string local_ssd_interface = 4 [(google.api.field_behavior) = OPTIONAL]; } +// Node group identification and configuration information. +message AuxiliaryNodeGroup { + // Required. Node group configuration. + NodeGroup node_group = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A node group ID. Generated if not specified. + // + // The ID must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). Cannot begin or end with underscore + // or hyphen. Must consist of from 3 to 33 characters. + string node_group_id = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// Dataproc Node Group. +// **The Dataproc `NodeGroup` resource is not related to the +// Dataproc [NodeGroupAffinity][google.cloud.dataproc.v1.NodeGroupAffinity] +// resource.** +message NodeGroup { + option (google.api.resource) = { + type: "dataproc.googleapis.com/NodeGroup" + pattern: "projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}" + }; + + // Node group roles. + enum Role { + // Required unspecified role. + ROLE_UNSPECIFIED = 0; + + // Job drivers run on the node group. + DRIVER = 1; + } + + // The Node group [resource name](https://aip.dev/122). + string name = 1; + + // Required. Node group roles. + repeated Role roles = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The node group instance group configuration. + InstanceGroupConfig node_group_config = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Node group labels. + // + // * Label **keys** must consist of from 1 to 63 characters and conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // * Label **values** can be empty. If specified, they must consist of from + // 1 to 63 characters and conform to [RFC 1035] + // (https://www.ietf.org/rfc/rfc1035.txt). + // * The node group must have no more than 32 labels. + map labels = 4 [(google.api.field_behavior) = OPTIONAL]; +} + // Specifies an executable to run on a fully configured node and a // timeout period for executable completion. message NodeInitializationAction { @@ -698,7 +796,8 @@ message NodeInitializationAction { // Cluster creation fails with an explanatory error message (the // name of the executable that caused the error and the exceeded timeout // period) if the executable is not completed at end of the timeout period. - google.protobuf.Duration execution_timeout = 2 [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Duration execution_timeout = 2 + [(google.api.field_behavior) = OPTIONAL]; } // The status of a cluster and its instances. @@ -771,7 +870,8 @@ message ClusterStatus { // Output only. Time when this state was entered (see JSON representation of // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Timestamp state_start_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp state_start_time = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Additional state information that includes // status reported by the agent. @@ -790,13 +890,14 @@ message SecurityConfig { // Specifies Kerberos related configuration. message KerberosConfig { - // Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set - // this field to true to enable Kerberos on a cluster. + // Optional. Flag to indicate whether to Kerberize the cluster (default: + // false). Set this field to true to enable Kerberos on a cluster. bool enable_kerberos = 1 [(google.api.field_behavior) = OPTIONAL]; // Optional. The Cloud Storage URI of a KMS encrypted file containing the root // principal password. - string root_principal_password_uri = 2 [(google.api.field_behavior) = OPTIONAL]; + string root_principal_password_uri = 2 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The uri of the KMS key used to encrypt various sensitive // files. @@ -837,12 +938,14 @@ message KerberosConfig { // Optional. The admin server (IP or hostname) for the remote trusted realm in // a cross realm trust relationship. - string cross_realm_trust_admin_server = 11 [(google.api.field_behavior) = OPTIONAL]; + string cross_realm_trust_admin_server = 11 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The Cloud Storage URI of a KMS encrypted file containing the // shared password between the on-cluster Kerberos realm and the remote // trusted realm, in a cross realm trust relationship. - string cross_realm_trust_shared_password_uri = 12 [(google.api.field_behavior) = OPTIONAL]; + string cross_realm_trust_shared_password_uri = 12 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The Cloud Storage URI of a KMS encrypted file containing the // master key of the KDC database. @@ -862,7 +965,8 @@ message KerberosConfig { // secure multi-tenancy user mappings. message IdentityConfig { // Required. Map of user to service account. - map user_service_account_mapping = 1 [(google.api.field_behavior) = REQUIRED]; + map user_service_account_mapping = 1 + [(google.api.field_behavior) = REQUIRED]; } // Specifies the selection and config of software inside the cluster. @@ -897,7 +1001,8 @@ message SoftwareConfig { map properties = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. The set of components to activate on the cluster. - repeated Component optional_components = 3 [(google.api.field_behavior) = OPTIONAL]; + repeated Component optional_components = 3 + [(google.api.field_behavior) = OPTIONAL]; } // Specifies the cluster auto-delete schedule configuration. @@ -907,27 +1012,32 @@ message LifecycleConfig { // deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON // representation of // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Duration idle_delete_ttl = 1 [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Duration idle_delete_ttl = 1 + [(google.api.field_behavior) = OPTIONAL]; // Either the exact time the cluster should be deleted at or // the cluster maximum age. oneof ttl { - // Optional. The time when cluster will be auto-deleted (see JSON representation of + // Optional. The time when cluster will be auto-deleted (see JSON + // representation of // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Timestamp auto_delete_time = 2 [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Timestamp auto_delete_time = 2 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The lifetime duration of cluster. The cluster will be // auto-deleted at the end of this period. Minimum value is 10 minutes; // maximum value is 14 days (see JSON representation of // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Duration auto_delete_ttl = 3 [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Duration auto_delete_ttl = 3 + [(google.api.field_behavior) = OPTIONAL]; } // Output only. The time when cluster became idle (most recent job finished) // and became eligible for deletion due to idleness (see JSON representation // of // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). - google.protobuf.Timestamp idle_start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp idle_start_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; } // Specifies a Metastore configuration. @@ -945,7 +1055,6 @@ message MetastoreConfig { ]; } - // Dataproc metric config. message DataprocMetricConfig { // A source for the collection of Dataproc OSS metrics (see [available OSS @@ -1014,7 +1123,8 @@ message DataprocMetricConfig { // sources is unaffected. For example, if both `SPARK` andd `YARN` metric // sources are enabled, and overrides are provided for Spark metrics only, // all default YARN metrics will be collected. - repeated string metric_overrides = 2 [(google.api.field_behavior) = OPTIONAL]; + repeated string metric_overrides = 2 + [(google.api.field_behavior) = OPTIONAL]; } // Required. Metrics sources to enable. @@ -1045,11 +1155,12 @@ message CreateClusterRequest { // Required. The cluster to create. Cluster cluster = 2 [(google.api.field_behavior) = REQUIRED]; - // Optional. A unique ID used to identify the request. If the server receives two + // Optional. A unique ID used to identify the request. If the server receives + // two // [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s // with the same id, then the second request will be ignored and the - // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend - // is returned. + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. // // It is recommended to always set this value to a // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). @@ -1059,7 +1170,8 @@ message CreateClusterRequest { string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. Failure action when primary worker creation fails. - FailureAction action_on_failed_primary_workers = 5 [(google.api.field_behavior) = OPTIONAL]; + FailureAction action_on_failed_primary_workers = 5 + [(google.api.field_behavior) = OPTIONAL]; } // A request to update a cluster. @@ -1086,7 +1198,8 @@ message UpdateClusterRequest { // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). // // Only supported on Dataproc image versions 1.2 and higher. - google.protobuf.Duration graceful_decommission_timeout = 6 [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Duration graceful_decommission_timeout = 6 + [(google.api.field_behavior) = OPTIONAL]; // Required. Specifies the path, relative to `Cluster`, of // the field to update. For example, to change the number of workers @@ -1139,14 +1252,15 @@ message UpdateClusterRequest { // // // - google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = REQUIRED]; + google.protobuf.FieldMask update_mask = 4 + [(google.api.field_behavior) = REQUIRED]; // Optional. A unique ID used to identify the request. If the server // receives two // [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s // with the same id, then the second request will be ignored and the - // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the - // backend is returned. + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. // // It is recommended to always set this value to a // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). @@ -1176,8 +1290,8 @@ message StopClusterRequest { // receives two // [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s // with the same id, then the second request will be ignored and the - // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the - // backend is returned. + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. // // Recommendation: Set this value to a // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). @@ -1207,8 +1321,8 @@ message StartClusterRequest { // receives two // [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s // with the same id, then the second request will be ignored and the - // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the - // backend is returned. + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. // // Recommendation: Set this value to a // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). @@ -1238,8 +1352,8 @@ message DeleteClusterRequest { // receives two // [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s // with the same id, then the second request will be ignored and the - // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the - // backend is returned. + // first [google.longrunning.Operation][google.longrunning.Operation] created + // and stored in the backend is returned. // // It is recommended to always set this value to a // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). diff --git a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/jobs.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/jobs.proto index 823a12cf8a8..e9dcf9cc6b8 100644 --- a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/jobs.proto +++ b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/jobs.proto @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -32,7 +32,8 @@ option java_package = "com.google.cloud.dataproc.v1"; // The JobController provides methods to manage jobs. service JobController { option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; // Submits a job to a cluster. rpc SubmitJob(SubmitJobRequest) returns (Job) { @@ -44,7 +45,8 @@ service JobController { } // Submits job to a cluster. - rpc SubmitJobAsOperation(SubmitJobRequest) returns (google.longrunning.Operation) { + rpc SubmitJobAsOperation(SubmitJobRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/jobs:submitAsOperation" body: "*" @@ -198,7 +200,7 @@ message HadoopJob { LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL]; } -// A Dataproc job for running [Apache Spark](http://spark.apache.org/) +// A Dataproc job for running [Apache Spark](https://spark.apache.org/) // applications on YARN. message SparkJob { // Required. The specification of the main method to call to drive the job. @@ -323,7 +325,8 @@ message HiveJob { // Optional. Mapping of query variable names to values (equivalent to the // Hive command: `SET name="value";`). - map script_variables = 4 [(google.api.field_behavior) = OPTIONAL]; + map script_variables = 4 + [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names and values, used to configure Hive. // Properties that conflict with values set by the Dataproc API may be @@ -338,7 +341,7 @@ message HiveJob { } // A Dataproc job for running [Apache Spark -// SQL](http://spark.apache.org/sql/) queries. +// SQL](https://spark.apache.org/sql/) queries. message SparkSqlJob { // Required. The sequence of Spark SQL queries to execute, specified as // either an HCFS file URI or as a list of queries. @@ -352,7 +355,8 @@ message SparkSqlJob { // Optional. Mapping of query variable names to values (equivalent to the // Spark SQL command: SET `name="value";`). - map script_variables = 3 [(google.api.field_behavior) = OPTIONAL]; + map script_variables = 3 + [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure // Spark SQL's SparkConf. Properties that conflict with values set by the @@ -386,7 +390,8 @@ message PigJob { // Optional. Mapping of query variable names to values (equivalent to the Pig // command: `name=[value]`). - map script_variables = 4 [(google.api.field_behavior) = OPTIONAL]; + map script_variables = 4 + [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure Pig. // Properties that conflict with values set by the Dataproc API may be @@ -480,8 +485,10 @@ message JobPlacement { // the job is submitted. string cluster_uuid = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Optional. Cluster labels to identify a cluster where the job will be submitted. - map cluster_labels = 3 [(google.api.field_behavior) = OPTIONAL]; + // Optional. Cluster labels to identify a cluster where the job will be + // submitted. + map cluster_labels = 3 + [(google.api.field_behavior) = OPTIONAL]; } // Dataproc job status. @@ -560,7 +567,8 @@ message JobStatus { ]; // Output only. The time when this state was entered. - google.protobuf.Timestamp state_start_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp state_start_time = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Additional state information, which includes // status reported by the agent. @@ -569,8 +577,8 @@ message JobStatus { // Encapsulates the full scoping used to reference a job. message JobReference { - // Optional. The ID of the Google Cloud Platform project that the job belongs to. If - // specified, must match the request project ID. + // Optional. The ID of the Google Cloud Platform project that the job belongs + // to. If specified, must match the request project ID. string project_id = 1 [(google.api.field_behavior) = OPTIONAL]; // Optional. The job ID, which must be unique within the project. @@ -680,22 +688,26 @@ message Job { JobStatus status = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The previous job status. - repeated JobStatus status_history = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated JobStatus status_history = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The collection of YARN applications spun up by this job. // // **Beta** Feature: This report is available for testing purposes only. It // may be changed before final release. - repeated YarnApplication yarn_applications = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated YarnApplication yarn_applications = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. A URI pointing to the location of the stdout of the job's // driver program. - string driver_output_resource_uri = 17 [(google.api.field_behavior) = OUTPUT_ONLY]; + string driver_output_resource_uri = 17 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. If present, the location of miscellaneous control files // which may be used as part of job setup and handling. If not present, // control files may be placed in the same location as `driver_output_uri`. - string driver_control_files_uri = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; + string driver_control_files_uri = 15 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. The labels to associate with this job. // Label **keys** must contain 1 to 63 characters, and must conform to @@ -714,11 +726,24 @@ message Job { // may be reused over time. string job_uuid = 22 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Output only. Indicates whether the job is completed. If the value is `false`, - // the job is still in progress. If `true`, the job is completed, and + // Output only. Indicates whether the job is completed. If the value is + // `false`, the job is still in progress. If `true`, the job is completed, and // `status.state` field will indicate if it was successful, failed, // or cancelled. bool done = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Driver scheduling configuration. + DriverSchedulingConfig driver_scheduling_config = 27 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Driver scheduling configuration. +message DriverSchedulingConfig { + // Required. The amount of memory in MB the driver is requesting. + int32 memory_mb = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The number of vCPUs the driver is requesting. + int32 vcores = 2 [(google.api.field_behavior) = REQUIRED]; } // Job scheduling options. @@ -727,27 +752,26 @@ message JobScheduling { // a result of driver exiting with non-zero code before job is // reported failed. // - // A job may be reported as thrashing if driver exits with non-zero code - // 4 times within 10 minute window. + // A job may be reported as thrashing if the driver exits with a non-zero code + // four times within a 10-minute window. // // Maximum value is 10. // - // **Note:** Currently, this restartable job option is - // not supported in Dataproc - // [workflow - // template](https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template) - // jobs. + // **Note:** This restartable job option is not supported in Dataproc + // [workflow templates] + // (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). int32 max_failures_per_hour = 1 [(google.api.field_behavior) = OPTIONAL]; - // Optional. Maximum number of times in total a driver may be restarted as a result of - // driver exiting with non-zero code before job is reported failed. + // Optional. Maximum total number of times a driver may be restarted as a + // result of the driver exiting with a non-zero code. After the maximum number + // is reached, the job will be reported as failed. + // // Maximum value is 240. // // **Note:** Currently, this restartable job option is // not supported in Dataproc // [workflow - // template](https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template) - // jobs. + // templates](https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). int32 max_failures_total = 2 [(google.api.field_behavior) = OPTIONAL]; } @@ -790,7 +814,8 @@ message JobMetadata { string operation_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Job submission time. - google.protobuf.Timestamp start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp start_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; } // A request to get the resource representation for a job in a project. @@ -843,7 +868,8 @@ message ListJobsRequest { // (default = match ALL jobs). // // If `filter` is provided, `jobStateMatcher` will be ignored. - JobStateMatcher job_state_matcher = 5 [(google.api.field_behavior) = OPTIONAL]; + JobStateMatcher job_state_matcher = 5 + [(google.api.field_behavior) = OPTIONAL]; // Optional. A filter constraining the jobs to list. Filters are // case-sensitive and have the following syntax: @@ -883,7 +909,8 @@ message UpdateJobRequest { // labels, and the `PATCH` request body would specify the new // value. Note: Currently, labels is the only // field that can be updated. - google.protobuf.FieldMask update_mask = 5 [(google.api.field_behavior) = REQUIRED]; + google.protobuf.FieldMask update_mask = 5 + [(google.api.field_behavior) = REQUIRED]; } // A list of jobs in a project. diff --git a/owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/node_groups.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/node_groups.proto similarity index 100% rename from owl-bot-staging/google-cloud-dataproc/v1/protos/google/cloud/dataproc/v1/node_groups.proto rename to packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/node_groups.proto diff --git a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/operations.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/operations.proto index e12bd299a88..854d9eda6ab 100644 --- a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/operations.proto +++ b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/operations.proto @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -87,7 +87,8 @@ message ClusterOperationStatus { string details = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The time this state was entered. - google.protobuf.Timestamp state_start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp state_start_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; } // Metadata describing the operation. @@ -102,7 +103,8 @@ message ClusterOperationMetadata { ClusterOperationStatus status = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The previous operation status. - repeated ClusterOperationStatus status_history = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated ClusterOperationStatus status_history = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The operation type. string operation_type = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -116,3 +118,49 @@ message ClusterOperationMetadata { // Output only. Errors encountered during operation execution. repeated string warnings = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; } + +// Metadata describing the node group operation. +message NodeGroupOperationMetadata { + // Operation type for node group resources. + enum NodeGroupOperationType { + // Node group operation type is unknown. + NODE_GROUP_OPERATION_TYPE_UNSPECIFIED = 0; + + // Create node group operation type. + CREATE = 1; + + // Update node group operation type. + UPDATE = 2; + + // Delete node group operation type. + DELETE = 3; + + // Resize node group operation type. + RESIZE = 4; + } + + // Output only. Node group ID for the operation. + string node_group_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Cluster UUID associated with the node group operation. + string cluster_uuid = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Current operation status. + ClusterOperationStatus status = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The previous operation status. + repeated ClusterOperationStatus status_history = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The operation type. + NodeGroupOperationType operation_type = 5; + + // Output only. Short description of operation. + string description = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Labels associated with the operation. + map labels = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Errors encountered during operation execution. + repeated string warnings = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/packages/google-cloud-dataproc/protos/protos.d.ts b/packages/google-cloud-dataproc/protos/protos.d.ts index 2f8072e69ae..835027498b3 100644 --- a/packages/google-cloud-dataproc/protos/protos.d.ts +++ b/packages/google-cloud-dataproc/protos/protos.d.ts @@ -4587,6 +4587,9 @@ export namespace google { /** ClusterConfig dataprocMetricConfig */ dataprocMetricConfig?: (google.cloud.dataproc.v1.IDataprocMetricConfig|null); + + /** ClusterConfig auxiliaryNodeGroups */ + auxiliaryNodeGroups?: (google.cloud.dataproc.v1.IAuxiliaryNodeGroup[]|null); } /** Represents a ClusterConfig. */ @@ -4643,6 +4646,9 @@ export namespace google { /** ClusterConfig dataprocMetricConfig. */ public dataprocMetricConfig?: (google.cloud.dataproc.v1.IDataprocMetricConfig|null); + /** ClusterConfig auxiliaryNodeGroups. */ + public auxiliaryNodeGroups: google.cloud.dataproc.v1.IAuxiliaryNodeGroup[]; + /** * Creates a new ClusterConfig instance using the specified properties. * @param [properties] Properties to set @@ -6198,6 +6204,233 @@ export namespace google { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of an AuxiliaryNodeGroup. */ + interface IAuxiliaryNodeGroup { + + /** AuxiliaryNodeGroup nodeGroup */ + nodeGroup?: (google.cloud.dataproc.v1.INodeGroup|null); + + /** AuxiliaryNodeGroup nodeGroupId */ + nodeGroupId?: (string|null); + } + + /** Represents an AuxiliaryNodeGroup. */ + class AuxiliaryNodeGroup implements IAuxiliaryNodeGroup { + + /** + * Constructs a new AuxiliaryNodeGroup. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.dataproc.v1.IAuxiliaryNodeGroup); + + /** AuxiliaryNodeGroup nodeGroup. */ + public nodeGroup?: (google.cloud.dataproc.v1.INodeGroup|null); + + /** AuxiliaryNodeGroup nodeGroupId. */ + public nodeGroupId: string; + + /** + * Creates a new AuxiliaryNodeGroup instance using the specified properties. + * @param [properties] Properties to set + * @returns AuxiliaryNodeGroup instance + */ + public static create(properties?: google.cloud.dataproc.v1.IAuxiliaryNodeGroup): google.cloud.dataproc.v1.AuxiliaryNodeGroup; + + /** + * Encodes the specified AuxiliaryNodeGroup message. Does not implicitly {@link google.cloud.dataproc.v1.AuxiliaryNodeGroup.verify|verify} messages. + * @param message AuxiliaryNodeGroup message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.dataproc.v1.IAuxiliaryNodeGroup, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified AuxiliaryNodeGroup message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.AuxiliaryNodeGroup.verify|verify} messages. + * @param message AuxiliaryNodeGroup message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.dataproc.v1.IAuxiliaryNodeGroup, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an AuxiliaryNodeGroup message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns AuxiliaryNodeGroup + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.dataproc.v1.AuxiliaryNodeGroup; + + /** + * Decodes an AuxiliaryNodeGroup message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns AuxiliaryNodeGroup + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.dataproc.v1.AuxiliaryNodeGroup; + + /** + * Verifies an AuxiliaryNodeGroup message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an AuxiliaryNodeGroup message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns AuxiliaryNodeGroup + */ + public static fromObject(object: { [k: string]: any }): google.cloud.dataproc.v1.AuxiliaryNodeGroup; + + /** + * Creates a plain object from an AuxiliaryNodeGroup message. Also converts values to other types if specified. + * @param message AuxiliaryNodeGroup + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.dataproc.v1.AuxiliaryNodeGroup, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this AuxiliaryNodeGroup to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for AuxiliaryNodeGroup + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a NodeGroup. */ + interface INodeGroup { + + /** NodeGroup name */ + name?: (string|null); + + /** NodeGroup roles */ + roles?: (google.cloud.dataproc.v1.NodeGroup.Role[]|null); + + /** NodeGroup nodeGroupConfig */ + nodeGroupConfig?: (google.cloud.dataproc.v1.IInstanceGroupConfig|null); + + /** NodeGroup labels */ + labels?: ({ [k: string]: string }|null); + } + + /** Represents a NodeGroup. */ + class NodeGroup implements INodeGroup { + + /** + * Constructs a new NodeGroup. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.dataproc.v1.INodeGroup); + + /** NodeGroup name. */ + public name: string; + + /** NodeGroup roles. */ + public roles: google.cloud.dataproc.v1.NodeGroup.Role[]; + + /** NodeGroup nodeGroupConfig. */ + public nodeGroupConfig?: (google.cloud.dataproc.v1.IInstanceGroupConfig|null); + + /** NodeGroup labels. */ + public labels: { [k: string]: string }; + + /** + * Creates a new NodeGroup instance using the specified properties. + * @param [properties] Properties to set + * @returns NodeGroup instance + */ + public static create(properties?: google.cloud.dataproc.v1.INodeGroup): google.cloud.dataproc.v1.NodeGroup; + + /** + * Encodes the specified NodeGroup message. Does not implicitly {@link google.cloud.dataproc.v1.NodeGroup.verify|verify} messages. + * @param message NodeGroup message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.dataproc.v1.INodeGroup, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified NodeGroup message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.NodeGroup.verify|verify} messages. + * @param message NodeGroup message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.dataproc.v1.INodeGroup, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a NodeGroup message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns NodeGroup + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.dataproc.v1.NodeGroup; + + /** + * Decodes a NodeGroup message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns NodeGroup + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.dataproc.v1.NodeGroup; + + /** + * Verifies a NodeGroup message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a NodeGroup message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns NodeGroup + */ + public static fromObject(object: { [k: string]: any }): google.cloud.dataproc.v1.NodeGroup; + + /** + * Creates a plain object from a NodeGroup message. Also converts values to other types if specified. + * @param message NodeGroup + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.dataproc.v1.NodeGroup, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this NodeGroup to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for NodeGroup + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + namespace NodeGroup { + + /** Role enum. */ + enum Role { + ROLE_UNSPECIFIED = 0, + DRIVER = 1 + } + } + /** Properties of a NodeInitializationAction. */ interface INodeInitializationAction { @@ -10743,6 +10976,9 @@ export namespace google { /** Job done */ done?: (boolean|null); + + /** Job driverSchedulingConfig */ + driverSchedulingConfig?: (google.cloud.dataproc.v1.IDriverSchedulingConfig|null); } /** Represents a Job. */ @@ -10811,6 +11047,9 @@ export namespace google { /** Job done. */ public done: boolean; + /** Job driverSchedulingConfig. */ + public driverSchedulingConfig?: (google.cloud.dataproc.v1.IDriverSchedulingConfig|null); + /** Job typeJob. */ public typeJob?: ("hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkRJob"|"sparkSqlJob"|"prestoJob"); @@ -10892,6 +11131,109 @@ export namespace google { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a DriverSchedulingConfig. */ + interface IDriverSchedulingConfig { + + /** DriverSchedulingConfig memoryMb */ + memoryMb?: (number|null); + + /** DriverSchedulingConfig vcores */ + vcores?: (number|null); + } + + /** Represents a DriverSchedulingConfig. */ + class DriverSchedulingConfig implements IDriverSchedulingConfig { + + /** + * Constructs a new DriverSchedulingConfig. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.dataproc.v1.IDriverSchedulingConfig); + + /** DriverSchedulingConfig memoryMb. */ + public memoryMb: number; + + /** DriverSchedulingConfig vcores. */ + public vcores: number; + + /** + * Creates a new DriverSchedulingConfig instance using the specified properties. + * @param [properties] Properties to set + * @returns DriverSchedulingConfig instance + */ + public static create(properties?: google.cloud.dataproc.v1.IDriverSchedulingConfig): google.cloud.dataproc.v1.DriverSchedulingConfig; + + /** + * Encodes the specified DriverSchedulingConfig message. Does not implicitly {@link google.cloud.dataproc.v1.DriverSchedulingConfig.verify|verify} messages. + * @param message DriverSchedulingConfig message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.dataproc.v1.IDriverSchedulingConfig, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DriverSchedulingConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DriverSchedulingConfig.verify|verify} messages. + * @param message DriverSchedulingConfig message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.dataproc.v1.IDriverSchedulingConfig, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DriverSchedulingConfig message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DriverSchedulingConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.dataproc.v1.DriverSchedulingConfig; + + /** + * Decodes a DriverSchedulingConfig message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DriverSchedulingConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.dataproc.v1.DriverSchedulingConfig; + + /** + * Verifies a DriverSchedulingConfig message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DriverSchedulingConfig message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DriverSchedulingConfig + */ + public static fromObject(object: { [k: string]: any }): google.cloud.dataproc.v1.DriverSchedulingConfig; + + /** + * Creates a plain object from a DriverSchedulingConfig message. Also converts values to other types if specified. + * @param message DriverSchedulingConfig + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.dataproc.v1.DriverSchedulingConfig, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DriverSchedulingConfig to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for DriverSchedulingConfig + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a JobScheduling. */ interface IJobScheduling { @@ -11919,29 +12261,443 @@ export namespace google { public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a BatchOperationMetadata. */ - interface IBatchOperationMetadata { + /** Represents a NodeGroupController */ + class NodeGroupController extends $protobuf.rpc.Service { - /** BatchOperationMetadata batch */ - batch?: (string|null); + /** + * Constructs a new NodeGroupController service. + * @param rpcImpl RPC implementation + * @param [requestDelimited=false] Whether requests are length-delimited + * @param [responseDelimited=false] Whether responses are length-delimited + */ + constructor(rpcImpl: $protobuf.RPCImpl, requestDelimited?: boolean, responseDelimited?: boolean); - /** BatchOperationMetadata batchUuid */ - batchUuid?: (string|null); + /** + * Creates new NodeGroupController service using the specified rpc implementation. + * @param rpcImpl RPC implementation + * @param [requestDelimited=false] Whether requests are length-delimited + * @param [responseDelimited=false] Whether responses are length-delimited + * @returns RPC service. Useful where requests and/or responses are streamed. + */ + public static create(rpcImpl: $protobuf.RPCImpl, requestDelimited?: boolean, responseDelimited?: boolean): NodeGroupController; - /** BatchOperationMetadata createTime */ - createTime?: (google.protobuf.ITimestamp|null); + /** + * Calls CreateNodeGroup. + * @param request CreateNodeGroupRequest message or plain object + * @param callback Node-style callback called with the error, if any, and Operation + */ + public createNodeGroup(request: google.cloud.dataproc.v1.ICreateNodeGroupRequest, callback: google.cloud.dataproc.v1.NodeGroupController.CreateNodeGroupCallback): void; - /** BatchOperationMetadata doneTime */ - doneTime?: (google.protobuf.ITimestamp|null); + /** + * Calls CreateNodeGroup. + * @param request CreateNodeGroupRequest message or plain object + * @returns Promise + */ + public createNodeGroup(request: google.cloud.dataproc.v1.ICreateNodeGroupRequest): Promise; - /** BatchOperationMetadata operationType */ - operationType?: (google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType|keyof typeof google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType|null); + /** + * Calls ResizeNodeGroup. + * @param request ResizeNodeGroupRequest message or plain object + * @param callback Node-style callback called with the error, if any, and Operation + */ + public resizeNodeGroup(request: google.cloud.dataproc.v1.IResizeNodeGroupRequest, callback: google.cloud.dataproc.v1.NodeGroupController.ResizeNodeGroupCallback): void; - /** BatchOperationMetadata description */ - description?: (string|null); + /** + * Calls ResizeNodeGroup. + * @param request ResizeNodeGroupRequest message or plain object + * @returns Promise + */ + public resizeNodeGroup(request: google.cloud.dataproc.v1.IResizeNodeGroupRequest): Promise; - /** BatchOperationMetadata labels */ - labels?: ({ [k: string]: string }|null); + /** + * Calls GetNodeGroup. + * @param request GetNodeGroupRequest message or plain object + * @param callback Node-style callback called with the error, if any, and NodeGroup + */ + public getNodeGroup(request: google.cloud.dataproc.v1.IGetNodeGroupRequest, callback: google.cloud.dataproc.v1.NodeGroupController.GetNodeGroupCallback): void; + + /** + * Calls GetNodeGroup. + * @param request GetNodeGroupRequest message or plain object + * @returns Promise + */ + public getNodeGroup(request: google.cloud.dataproc.v1.IGetNodeGroupRequest): Promise; + } + + namespace NodeGroupController { + + /** + * Callback as used by {@link google.cloud.dataproc.v1.NodeGroupController|createNodeGroup}. + * @param error Error, if any + * @param [response] Operation + */ + type CreateNodeGroupCallback = (error: (Error|null), response?: google.longrunning.Operation) => void; + + /** + * Callback as used by {@link google.cloud.dataproc.v1.NodeGroupController|resizeNodeGroup}. + * @param error Error, if any + * @param [response] Operation + */ + type ResizeNodeGroupCallback = (error: (Error|null), response?: google.longrunning.Operation) => void; + + /** + * Callback as used by {@link google.cloud.dataproc.v1.NodeGroupController|getNodeGroup}. + * @param error Error, if any + * @param [response] NodeGroup + */ + type GetNodeGroupCallback = (error: (Error|null), response?: google.cloud.dataproc.v1.NodeGroup) => void; + } + + /** Properties of a CreateNodeGroupRequest. */ + interface ICreateNodeGroupRequest { + + /** CreateNodeGroupRequest parent */ + parent?: (string|null); + + /** CreateNodeGroupRequest nodeGroup */ + nodeGroup?: (google.cloud.dataproc.v1.INodeGroup|null); + + /** CreateNodeGroupRequest nodeGroupId */ + nodeGroupId?: (string|null); + + /** CreateNodeGroupRequest requestId */ + requestId?: (string|null); + } + + /** Represents a CreateNodeGroupRequest. */ + class CreateNodeGroupRequest implements ICreateNodeGroupRequest { + + /** + * Constructs a new CreateNodeGroupRequest. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.dataproc.v1.ICreateNodeGroupRequest); + + /** CreateNodeGroupRequest parent. */ + public parent: string; + + /** CreateNodeGroupRequest nodeGroup. */ + public nodeGroup?: (google.cloud.dataproc.v1.INodeGroup|null); + + /** CreateNodeGroupRequest nodeGroupId. */ + public nodeGroupId: string; + + /** CreateNodeGroupRequest requestId. */ + public requestId: string; + + /** + * Creates a new CreateNodeGroupRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns CreateNodeGroupRequest instance + */ + public static create(properties?: google.cloud.dataproc.v1.ICreateNodeGroupRequest): google.cloud.dataproc.v1.CreateNodeGroupRequest; + + /** + * Encodes the specified CreateNodeGroupRequest message. Does not implicitly {@link google.cloud.dataproc.v1.CreateNodeGroupRequest.verify|verify} messages. + * @param message CreateNodeGroupRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.dataproc.v1.ICreateNodeGroupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CreateNodeGroupRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.CreateNodeGroupRequest.verify|verify} messages. + * @param message CreateNodeGroupRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.dataproc.v1.ICreateNodeGroupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CreateNodeGroupRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CreateNodeGroupRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.dataproc.v1.CreateNodeGroupRequest; + + /** + * Decodes a CreateNodeGroupRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CreateNodeGroupRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.dataproc.v1.CreateNodeGroupRequest; + + /** + * Verifies a CreateNodeGroupRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CreateNodeGroupRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CreateNodeGroupRequest + */ + public static fromObject(object: { [k: string]: any }): google.cloud.dataproc.v1.CreateNodeGroupRequest; + + /** + * Creates a plain object from a CreateNodeGroupRequest message. Also converts values to other types if specified. + * @param message CreateNodeGroupRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.dataproc.v1.CreateNodeGroupRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CreateNodeGroupRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CreateNodeGroupRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ResizeNodeGroupRequest. */ + interface IResizeNodeGroupRequest { + + /** ResizeNodeGroupRequest name */ + name?: (string|null); + + /** ResizeNodeGroupRequest size */ + size?: (number|null); + + /** ResizeNodeGroupRequest requestId */ + requestId?: (string|null); + + /** ResizeNodeGroupRequest gracefulDecommissionTimeout */ + gracefulDecommissionTimeout?: (google.protobuf.IDuration|null); + } + + /** Represents a ResizeNodeGroupRequest. */ + class ResizeNodeGroupRequest implements IResizeNodeGroupRequest { + + /** + * Constructs a new ResizeNodeGroupRequest. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.dataproc.v1.IResizeNodeGroupRequest); + + /** ResizeNodeGroupRequest name. */ + public name: string; + + /** ResizeNodeGroupRequest size. */ + public size: number; + + /** ResizeNodeGroupRequest requestId. */ + public requestId: string; + + /** ResizeNodeGroupRequest gracefulDecommissionTimeout. */ + public gracefulDecommissionTimeout?: (google.protobuf.IDuration|null); + + /** + * Creates a new ResizeNodeGroupRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ResizeNodeGroupRequest instance + */ + public static create(properties?: google.cloud.dataproc.v1.IResizeNodeGroupRequest): google.cloud.dataproc.v1.ResizeNodeGroupRequest; + + /** + * Encodes the specified ResizeNodeGroupRequest message. Does not implicitly {@link google.cloud.dataproc.v1.ResizeNodeGroupRequest.verify|verify} messages. + * @param message ResizeNodeGroupRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.dataproc.v1.IResizeNodeGroupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ResizeNodeGroupRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ResizeNodeGroupRequest.verify|verify} messages. + * @param message ResizeNodeGroupRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.dataproc.v1.IResizeNodeGroupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ResizeNodeGroupRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ResizeNodeGroupRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.dataproc.v1.ResizeNodeGroupRequest; + + /** + * Decodes a ResizeNodeGroupRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ResizeNodeGroupRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.dataproc.v1.ResizeNodeGroupRequest; + + /** + * Verifies a ResizeNodeGroupRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ResizeNodeGroupRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ResizeNodeGroupRequest + */ + public static fromObject(object: { [k: string]: any }): google.cloud.dataproc.v1.ResizeNodeGroupRequest; + + /** + * Creates a plain object from a ResizeNodeGroupRequest message. Also converts values to other types if specified. + * @param message ResizeNodeGroupRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.dataproc.v1.ResizeNodeGroupRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ResizeNodeGroupRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ResizeNodeGroupRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a GetNodeGroupRequest. */ + interface IGetNodeGroupRequest { + + /** GetNodeGroupRequest name */ + name?: (string|null); + } + + /** Represents a GetNodeGroupRequest. */ + class GetNodeGroupRequest implements IGetNodeGroupRequest { + + /** + * Constructs a new GetNodeGroupRequest. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.dataproc.v1.IGetNodeGroupRequest); + + /** GetNodeGroupRequest name. */ + public name: string; + + /** + * Creates a new GetNodeGroupRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns GetNodeGroupRequest instance + */ + public static create(properties?: google.cloud.dataproc.v1.IGetNodeGroupRequest): google.cloud.dataproc.v1.GetNodeGroupRequest; + + /** + * Encodes the specified GetNodeGroupRequest message. Does not implicitly {@link google.cloud.dataproc.v1.GetNodeGroupRequest.verify|verify} messages. + * @param message GetNodeGroupRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.dataproc.v1.IGetNodeGroupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified GetNodeGroupRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.GetNodeGroupRequest.verify|verify} messages. + * @param message GetNodeGroupRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.dataproc.v1.IGetNodeGroupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a GetNodeGroupRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns GetNodeGroupRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.dataproc.v1.GetNodeGroupRequest; + + /** + * Decodes a GetNodeGroupRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns GetNodeGroupRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.dataproc.v1.GetNodeGroupRequest; + + /** + * Verifies a GetNodeGroupRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a GetNodeGroupRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns GetNodeGroupRequest + */ + public static fromObject(object: { [k: string]: any }): google.cloud.dataproc.v1.GetNodeGroupRequest; + + /** + * Creates a plain object from a GetNodeGroupRequest message. Also converts values to other types if specified. + * @param message GetNodeGroupRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.dataproc.v1.GetNodeGroupRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this GetNodeGroupRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for GetNodeGroupRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a BatchOperationMetadata. */ + interface IBatchOperationMetadata { + + /** BatchOperationMetadata batch */ + batch?: (string|null); + + /** BatchOperationMetadata batchUuid */ + batchUuid?: (string|null); + + /** BatchOperationMetadata createTime */ + createTime?: (google.protobuf.ITimestamp|null); + + /** BatchOperationMetadata doneTime */ + doneTime?: (google.protobuf.ITimestamp|null); + + /** BatchOperationMetadata operationType */ + operationType?: (google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType|keyof typeof google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType|null); + + /** BatchOperationMetadata description */ + description?: (string|null); + + /** BatchOperationMetadata labels */ + labels?: ({ [k: string]: string }|null); /** BatchOperationMetadata warnings */ warnings?: (string[]|null); @@ -12332,6 +13088,157 @@ export namespace google { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a NodeGroupOperationMetadata. */ + interface INodeGroupOperationMetadata { + + /** NodeGroupOperationMetadata nodeGroupId */ + nodeGroupId?: (string|null); + + /** NodeGroupOperationMetadata clusterUuid */ + clusterUuid?: (string|null); + + /** NodeGroupOperationMetadata status */ + status?: (google.cloud.dataproc.v1.IClusterOperationStatus|null); + + /** NodeGroupOperationMetadata statusHistory */ + statusHistory?: (google.cloud.dataproc.v1.IClusterOperationStatus[]|null); + + /** NodeGroupOperationMetadata operationType */ + operationType?: (google.cloud.dataproc.v1.NodeGroupOperationMetadata.NodeGroupOperationType|keyof typeof google.cloud.dataproc.v1.NodeGroupOperationMetadata.NodeGroupOperationType|null); + + /** NodeGroupOperationMetadata description */ + description?: (string|null); + + /** NodeGroupOperationMetadata labels */ + labels?: ({ [k: string]: string }|null); + + /** NodeGroupOperationMetadata warnings */ + warnings?: (string[]|null); + } + + /** Represents a NodeGroupOperationMetadata. */ + class NodeGroupOperationMetadata implements INodeGroupOperationMetadata { + + /** + * Constructs a new NodeGroupOperationMetadata. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.dataproc.v1.INodeGroupOperationMetadata); + + /** NodeGroupOperationMetadata nodeGroupId. */ + public nodeGroupId: string; + + /** NodeGroupOperationMetadata clusterUuid. */ + public clusterUuid: string; + + /** NodeGroupOperationMetadata status. */ + public status?: (google.cloud.dataproc.v1.IClusterOperationStatus|null); + + /** NodeGroupOperationMetadata statusHistory. */ + public statusHistory: google.cloud.dataproc.v1.IClusterOperationStatus[]; + + /** NodeGroupOperationMetadata operationType. */ + public operationType: (google.cloud.dataproc.v1.NodeGroupOperationMetadata.NodeGroupOperationType|keyof typeof google.cloud.dataproc.v1.NodeGroupOperationMetadata.NodeGroupOperationType); + + /** NodeGroupOperationMetadata description. */ + public description: string; + + /** NodeGroupOperationMetadata labels. */ + public labels: { [k: string]: string }; + + /** NodeGroupOperationMetadata warnings. */ + public warnings: string[]; + + /** + * Creates a new NodeGroupOperationMetadata instance using the specified properties. + * @param [properties] Properties to set + * @returns NodeGroupOperationMetadata instance + */ + public static create(properties?: google.cloud.dataproc.v1.INodeGroupOperationMetadata): google.cloud.dataproc.v1.NodeGroupOperationMetadata; + + /** + * Encodes the specified NodeGroupOperationMetadata message. Does not implicitly {@link google.cloud.dataproc.v1.NodeGroupOperationMetadata.verify|verify} messages. + * @param message NodeGroupOperationMetadata message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.dataproc.v1.INodeGroupOperationMetadata, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified NodeGroupOperationMetadata message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.NodeGroupOperationMetadata.verify|verify} messages. + * @param message NodeGroupOperationMetadata message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.dataproc.v1.INodeGroupOperationMetadata, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a NodeGroupOperationMetadata message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns NodeGroupOperationMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.dataproc.v1.NodeGroupOperationMetadata; + + /** + * Decodes a NodeGroupOperationMetadata message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns NodeGroupOperationMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.dataproc.v1.NodeGroupOperationMetadata; + + /** + * Verifies a NodeGroupOperationMetadata message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a NodeGroupOperationMetadata message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns NodeGroupOperationMetadata + */ + public static fromObject(object: { [k: string]: any }): google.cloud.dataproc.v1.NodeGroupOperationMetadata; + + /** + * Creates a plain object from a NodeGroupOperationMetadata message. Also converts values to other types if specified. + * @param message NodeGroupOperationMetadata + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.dataproc.v1.NodeGroupOperationMetadata, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this NodeGroupOperationMetadata to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for NodeGroupOperationMetadata + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + namespace NodeGroupOperationMetadata { + + /** NodeGroupOperationType enum. */ + enum NodeGroupOperationType { + NODE_GROUP_OPERATION_TYPE_UNSPECIFIED = 0, + CREATE = 1, + UPDATE = 2, + DELETE = 3, + RESIZE = 4 + } + } + /** Represents a WorkflowTemplateService */ class WorkflowTemplateService extends $protobuf.rpc.Service { diff --git a/packages/google-cloud-dataproc/protos/protos.js b/packages/google-cloud-dataproc/protos/protos.js index 72a580057f0..3ac51b048a5 100644 --- a/packages/google-cloud-dataproc/protos/protos.js +++ b/packages/google-cloud-dataproc/protos/protos.js @@ -11312,6 +11312,7 @@ * @property {google.cloud.dataproc.v1.IEndpointConfig|null} [endpointConfig] ClusterConfig endpointConfig * @property {google.cloud.dataproc.v1.IMetastoreConfig|null} [metastoreConfig] ClusterConfig metastoreConfig * @property {google.cloud.dataproc.v1.IDataprocMetricConfig|null} [dataprocMetricConfig] ClusterConfig dataprocMetricConfig + * @property {Array.|null} [auxiliaryNodeGroups] ClusterConfig auxiliaryNodeGroups */ /** @@ -11324,6 +11325,7 @@ */ function ClusterConfig(properties) { this.initializationActions = []; + this.auxiliaryNodeGroups = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -11450,6 +11452,14 @@ */ ClusterConfig.prototype.dataprocMetricConfig = null; + /** + * ClusterConfig auxiliaryNodeGroups. + * @member {Array.} auxiliaryNodeGroups + * @memberof google.cloud.dataproc.v1.ClusterConfig + * @instance + */ + ClusterConfig.prototype.auxiliaryNodeGroups = $util.emptyArray; + /** * Creates a new ClusterConfig instance using the specified properties. * @function create @@ -11505,6 +11515,9 @@ $root.google.cloud.dataproc.v1.MetastoreConfig.encode(message.metastoreConfig, writer.uint32(/* id 20, wireType 2 =*/162).fork()).ldelim(); if (message.dataprocMetricConfig != null && Object.hasOwnProperty.call(message, "dataprocMetricConfig")) $root.google.cloud.dataproc.v1.DataprocMetricConfig.encode(message.dataprocMetricConfig, writer.uint32(/* id 23, wireType 2 =*/186).fork()).ldelim(); + if (message.auxiliaryNodeGroups != null && message.auxiliaryNodeGroups.length) + for (var i = 0; i < message.auxiliaryNodeGroups.length; ++i) + $root.google.cloud.dataproc.v1.AuxiliaryNodeGroup.encode(message.auxiliaryNodeGroups[i], writer.uint32(/* id 25, wireType 2 =*/202).fork()).ldelim(); return writer; }; @@ -11601,6 +11614,12 @@ message.dataprocMetricConfig = $root.google.cloud.dataproc.v1.DataprocMetricConfig.decode(reader, reader.uint32()); break; } + case 25: { + if (!(message.auxiliaryNodeGroups && message.auxiliaryNodeGroups.length)) + message.auxiliaryNodeGroups = []; + message.auxiliaryNodeGroups.push($root.google.cloud.dataproc.v1.AuxiliaryNodeGroup.decode(reader, reader.uint32())); + break; + } default: reader.skipType(tag & 7); break; @@ -11711,6 +11730,15 @@ if (error) return "dataprocMetricConfig." + error; } + if (message.auxiliaryNodeGroups != null && message.hasOwnProperty("auxiliaryNodeGroups")) { + if (!Array.isArray(message.auxiliaryNodeGroups)) + return "auxiliaryNodeGroups: array expected"; + for (var i = 0; i < message.auxiliaryNodeGroups.length; ++i) { + var error = $root.google.cloud.dataproc.v1.AuxiliaryNodeGroup.verify(message.auxiliaryNodeGroups[i]); + if (error) + return "auxiliaryNodeGroups." + error; + } + } return null; }; @@ -11800,6 +11828,16 @@ throw TypeError(".google.cloud.dataproc.v1.ClusterConfig.dataprocMetricConfig: object expected"); message.dataprocMetricConfig = $root.google.cloud.dataproc.v1.DataprocMetricConfig.fromObject(object.dataprocMetricConfig); } + if (object.auxiliaryNodeGroups) { + if (!Array.isArray(object.auxiliaryNodeGroups)) + throw TypeError(".google.cloud.dataproc.v1.ClusterConfig.auxiliaryNodeGroups: array expected"); + message.auxiliaryNodeGroups = []; + for (var i = 0; i < object.auxiliaryNodeGroups.length; ++i) { + if (typeof object.auxiliaryNodeGroups[i] !== "object") + throw TypeError(".google.cloud.dataproc.v1.ClusterConfig.auxiliaryNodeGroups: object expected"); + message.auxiliaryNodeGroups[i] = $root.google.cloud.dataproc.v1.AuxiliaryNodeGroup.fromObject(object.auxiliaryNodeGroups[i]); + } + } return message; }; @@ -11816,8 +11854,10 @@ if (!options) options = {}; var object = {}; - if (options.arrays || options.defaults) + if (options.arrays || options.defaults) { object.initializationActions = []; + object.auxiliaryNodeGroups = []; + } if (options.defaults) { object.configBucket = ""; object.tempBucket = ""; @@ -11867,6 +11907,11 @@ object.metastoreConfig = $root.google.cloud.dataproc.v1.MetastoreConfig.toObject(message.metastoreConfig, options); if (message.dataprocMetricConfig != null && message.hasOwnProperty("dataprocMetricConfig")) object.dataprocMetricConfig = $root.google.cloud.dataproc.v1.DataprocMetricConfig.toObject(message.dataprocMetricConfig, options); + if (message.auxiliaryNodeGroups && message.auxiliaryNodeGroups.length) { + object.auxiliaryNodeGroups = []; + for (var j = 0; j < message.auxiliaryNodeGroups.length; ++j) + object.auxiliaryNodeGroups[j] = $root.google.cloud.dataproc.v1.AuxiliaryNodeGroup.toObject(message.auxiliaryNodeGroups[j], options); + } return object; }; @@ -15584,25 +15629,25 @@ return DiskConfig; })(); - v1.NodeInitializationAction = (function() { + v1.AuxiliaryNodeGroup = (function() { /** - * Properties of a NodeInitializationAction. + * Properties of an AuxiliaryNodeGroup. * @memberof google.cloud.dataproc.v1 - * @interface INodeInitializationAction - * @property {string|null} [executableFile] NodeInitializationAction executableFile - * @property {google.protobuf.IDuration|null} [executionTimeout] NodeInitializationAction executionTimeout + * @interface IAuxiliaryNodeGroup + * @property {google.cloud.dataproc.v1.INodeGroup|null} [nodeGroup] AuxiliaryNodeGroup nodeGroup + * @property {string|null} [nodeGroupId] AuxiliaryNodeGroup nodeGroupId */ /** - * Constructs a new NodeInitializationAction. + * Constructs a new AuxiliaryNodeGroup. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a NodeInitializationAction. - * @implements INodeInitializationAction + * @classdesc Represents an AuxiliaryNodeGroup. + * @implements IAuxiliaryNodeGroup * @constructor - * @param {google.cloud.dataproc.v1.INodeInitializationAction=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IAuxiliaryNodeGroup=} [properties] Properties to set */ - function NodeInitializationAction(properties) { + function AuxiliaryNodeGroup(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -15610,89 +15655,89 @@ } /** - * NodeInitializationAction executableFile. - * @member {string} executableFile - * @memberof google.cloud.dataproc.v1.NodeInitializationAction + * AuxiliaryNodeGroup nodeGroup. + * @member {google.cloud.dataproc.v1.INodeGroup|null|undefined} nodeGroup + * @memberof google.cloud.dataproc.v1.AuxiliaryNodeGroup * @instance */ - NodeInitializationAction.prototype.executableFile = ""; + AuxiliaryNodeGroup.prototype.nodeGroup = null; /** - * NodeInitializationAction executionTimeout. - * @member {google.protobuf.IDuration|null|undefined} executionTimeout - * @memberof google.cloud.dataproc.v1.NodeInitializationAction + * AuxiliaryNodeGroup nodeGroupId. + * @member {string} nodeGroupId + * @memberof google.cloud.dataproc.v1.AuxiliaryNodeGroup * @instance */ - NodeInitializationAction.prototype.executionTimeout = null; + AuxiliaryNodeGroup.prototype.nodeGroupId = ""; /** - * Creates a new NodeInitializationAction instance using the specified properties. + * Creates a new AuxiliaryNodeGroup instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.NodeInitializationAction + * @memberof google.cloud.dataproc.v1.AuxiliaryNodeGroup * @static - * @param {google.cloud.dataproc.v1.INodeInitializationAction=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.NodeInitializationAction} NodeInitializationAction instance + * @param {google.cloud.dataproc.v1.IAuxiliaryNodeGroup=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.AuxiliaryNodeGroup} AuxiliaryNodeGroup instance */ - NodeInitializationAction.create = function create(properties) { - return new NodeInitializationAction(properties); + AuxiliaryNodeGroup.create = function create(properties) { + return new AuxiliaryNodeGroup(properties); }; /** - * Encodes the specified NodeInitializationAction message. Does not implicitly {@link google.cloud.dataproc.v1.NodeInitializationAction.verify|verify} messages. + * Encodes the specified AuxiliaryNodeGroup message. Does not implicitly {@link google.cloud.dataproc.v1.AuxiliaryNodeGroup.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.NodeInitializationAction + * @memberof google.cloud.dataproc.v1.AuxiliaryNodeGroup * @static - * @param {google.cloud.dataproc.v1.INodeInitializationAction} message NodeInitializationAction message or plain object to encode + * @param {google.cloud.dataproc.v1.IAuxiliaryNodeGroup} message AuxiliaryNodeGroup message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - NodeInitializationAction.encode = function encode(message, writer) { + AuxiliaryNodeGroup.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.executableFile != null && Object.hasOwnProperty.call(message, "executableFile")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.executableFile); - if (message.executionTimeout != null && Object.hasOwnProperty.call(message, "executionTimeout")) - $root.google.protobuf.Duration.encode(message.executionTimeout, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.nodeGroup != null && Object.hasOwnProperty.call(message, "nodeGroup")) + $root.google.cloud.dataproc.v1.NodeGroup.encode(message.nodeGroup, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.nodeGroupId != null && Object.hasOwnProperty.call(message, "nodeGroupId")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.nodeGroupId); return writer; }; /** - * Encodes the specified NodeInitializationAction message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.NodeInitializationAction.verify|verify} messages. + * Encodes the specified AuxiliaryNodeGroup message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.AuxiliaryNodeGroup.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.NodeInitializationAction + * @memberof google.cloud.dataproc.v1.AuxiliaryNodeGroup * @static - * @param {google.cloud.dataproc.v1.INodeInitializationAction} message NodeInitializationAction message or plain object to encode + * @param {google.cloud.dataproc.v1.IAuxiliaryNodeGroup} message AuxiliaryNodeGroup message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - NodeInitializationAction.encodeDelimited = function encodeDelimited(message, writer) { + AuxiliaryNodeGroup.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a NodeInitializationAction message from the specified reader or buffer. + * Decodes an AuxiliaryNodeGroup message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.NodeInitializationAction + * @memberof google.cloud.dataproc.v1.AuxiliaryNodeGroup * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.NodeInitializationAction} NodeInitializationAction + * @returns {google.cloud.dataproc.v1.AuxiliaryNodeGroup} AuxiliaryNodeGroup * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - NodeInitializationAction.decode = function decode(reader, length) { + AuxiliaryNodeGroup.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.NodeInitializationAction(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.AuxiliaryNodeGroup(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.executableFile = reader.string(); + message.nodeGroup = $root.google.cloud.dataproc.v1.NodeGroup.decode(reader, reader.uint32()); break; } case 2: { - message.executionTimeout = $root.google.protobuf.Duration.decode(reader, reader.uint32()); + message.nodeGroupId = reader.string(); break; } default: @@ -15704,139 +15749,141 @@ }; /** - * Decodes a NodeInitializationAction message from the specified reader or buffer, length delimited. + * Decodes an AuxiliaryNodeGroup message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.NodeInitializationAction + * @memberof google.cloud.dataproc.v1.AuxiliaryNodeGroup * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.NodeInitializationAction} NodeInitializationAction + * @returns {google.cloud.dataproc.v1.AuxiliaryNodeGroup} AuxiliaryNodeGroup * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - NodeInitializationAction.decodeDelimited = function decodeDelimited(reader) { + AuxiliaryNodeGroup.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a NodeInitializationAction message. + * Verifies an AuxiliaryNodeGroup message. * @function verify - * @memberof google.cloud.dataproc.v1.NodeInitializationAction + * @memberof google.cloud.dataproc.v1.AuxiliaryNodeGroup * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - NodeInitializationAction.verify = function verify(message) { + AuxiliaryNodeGroup.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.executableFile != null && message.hasOwnProperty("executableFile")) - if (!$util.isString(message.executableFile)) - return "executableFile: string expected"; - if (message.executionTimeout != null && message.hasOwnProperty("executionTimeout")) { - var error = $root.google.protobuf.Duration.verify(message.executionTimeout); + if (message.nodeGroup != null && message.hasOwnProperty("nodeGroup")) { + var error = $root.google.cloud.dataproc.v1.NodeGroup.verify(message.nodeGroup); if (error) - return "executionTimeout." + error; + return "nodeGroup." + error; } + if (message.nodeGroupId != null && message.hasOwnProperty("nodeGroupId")) + if (!$util.isString(message.nodeGroupId)) + return "nodeGroupId: string expected"; return null; }; /** - * Creates a NodeInitializationAction message from a plain object. Also converts values to their respective internal types. + * Creates an AuxiliaryNodeGroup message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.NodeInitializationAction + * @memberof google.cloud.dataproc.v1.AuxiliaryNodeGroup * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.NodeInitializationAction} NodeInitializationAction + * @returns {google.cloud.dataproc.v1.AuxiliaryNodeGroup} AuxiliaryNodeGroup */ - NodeInitializationAction.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.NodeInitializationAction) + AuxiliaryNodeGroup.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.AuxiliaryNodeGroup) return object; - var message = new $root.google.cloud.dataproc.v1.NodeInitializationAction(); - if (object.executableFile != null) - message.executableFile = String(object.executableFile); - if (object.executionTimeout != null) { - if (typeof object.executionTimeout !== "object") - throw TypeError(".google.cloud.dataproc.v1.NodeInitializationAction.executionTimeout: object expected"); - message.executionTimeout = $root.google.protobuf.Duration.fromObject(object.executionTimeout); - } + var message = new $root.google.cloud.dataproc.v1.AuxiliaryNodeGroup(); + if (object.nodeGroup != null) { + if (typeof object.nodeGroup !== "object") + throw TypeError(".google.cloud.dataproc.v1.AuxiliaryNodeGroup.nodeGroup: object expected"); + message.nodeGroup = $root.google.cloud.dataproc.v1.NodeGroup.fromObject(object.nodeGroup); + } + if (object.nodeGroupId != null) + message.nodeGroupId = String(object.nodeGroupId); return message; }; /** - * Creates a plain object from a NodeInitializationAction message. Also converts values to other types if specified. + * Creates a plain object from an AuxiliaryNodeGroup message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.NodeInitializationAction + * @memberof google.cloud.dataproc.v1.AuxiliaryNodeGroup * @static - * @param {google.cloud.dataproc.v1.NodeInitializationAction} message NodeInitializationAction + * @param {google.cloud.dataproc.v1.AuxiliaryNodeGroup} message AuxiliaryNodeGroup * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - NodeInitializationAction.toObject = function toObject(message, options) { + AuxiliaryNodeGroup.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { - object.executableFile = ""; - object.executionTimeout = null; + object.nodeGroup = null; + object.nodeGroupId = ""; } - if (message.executableFile != null && message.hasOwnProperty("executableFile")) - object.executableFile = message.executableFile; - if (message.executionTimeout != null && message.hasOwnProperty("executionTimeout")) - object.executionTimeout = $root.google.protobuf.Duration.toObject(message.executionTimeout, options); + if (message.nodeGroup != null && message.hasOwnProperty("nodeGroup")) + object.nodeGroup = $root.google.cloud.dataproc.v1.NodeGroup.toObject(message.nodeGroup, options); + if (message.nodeGroupId != null && message.hasOwnProperty("nodeGroupId")) + object.nodeGroupId = message.nodeGroupId; return object; }; /** - * Converts this NodeInitializationAction to JSON. + * Converts this AuxiliaryNodeGroup to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.NodeInitializationAction + * @memberof google.cloud.dataproc.v1.AuxiliaryNodeGroup * @instance * @returns {Object.} JSON object */ - NodeInitializationAction.prototype.toJSON = function toJSON() { + AuxiliaryNodeGroup.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for NodeInitializationAction + * Gets the default type url for AuxiliaryNodeGroup * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.NodeInitializationAction + * @memberof google.cloud.dataproc.v1.AuxiliaryNodeGroup * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - NodeInitializationAction.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + AuxiliaryNodeGroup.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.NodeInitializationAction"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.AuxiliaryNodeGroup"; }; - return NodeInitializationAction; + return AuxiliaryNodeGroup; })(); - v1.ClusterStatus = (function() { + v1.NodeGroup = (function() { /** - * Properties of a ClusterStatus. + * Properties of a NodeGroup. * @memberof google.cloud.dataproc.v1 - * @interface IClusterStatus - * @property {google.cloud.dataproc.v1.ClusterStatus.State|null} [state] ClusterStatus state - * @property {string|null} [detail] ClusterStatus detail - * @property {google.protobuf.ITimestamp|null} [stateStartTime] ClusterStatus stateStartTime - * @property {google.cloud.dataproc.v1.ClusterStatus.Substate|null} [substate] ClusterStatus substate + * @interface INodeGroup + * @property {string|null} [name] NodeGroup name + * @property {Array.|null} [roles] NodeGroup roles + * @property {google.cloud.dataproc.v1.IInstanceGroupConfig|null} [nodeGroupConfig] NodeGroup nodeGroupConfig + * @property {Object.|null} [labels] NodeGroup labels */ /** - * Constructs a new ClusterStatus. + * Constructs a new NodeGroup. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a ClusterStatus. - * @implements IClusterStatus + * @classdesc Represents a NodeGroup. + * @implements INodeGroup * @constructor - * @param {google.cloud.dataproc.v1.IClusterStatus=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.INodeGroup=} [properties] Properties to set */ - function ClusterStatus(properties) { + function NodeGroup(properties) { + this.roles = []; + this.labels = {}; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -15844,117 +15891,148 @@ } /** - * ClusterStatus state. - * @member {google.cloud.dataproc.v1.ClusterStatus.State} state - * @memberof google.cloud.dataproc.v1.ClusterStatus + * NodeGroup name. + * @member {string} name + * @memberof google.cloud.dataproc.v1.NodeGroup * @instance */ - ClusterStatus.prototype.state = 0; + NodeGroup.prototype.name = ""; /** - * ClusterStatus detail. - * @member {string} detail - * @memberof google.cloud.dataproc.v1.ClusterStatus + * NodeGroup roles. + * @member {Array.} roles + * @memberof google.cloud.dataproc.v1.NodeGroup * @instance */ - ClusterStatus.prototype.detail = ""; + NodeGroup.prototype.roles = $util.emptyArray; /** - * ClusterStatus stateStartTime. - * @member {google.protobuf.ITimestamp|null|undefined} stateStartTime - * @memberof google.cloud.dataproc.v1.ClusterStatus + * NodeGroup nodeGroupConfig. + * @member {google.cloud.dataproc.v1.IInstanceGroupConfig|null|undefined} nodeGroupConfig + * @memberof google.cloud.dataproc.v1.NodeGroup * @instance */ - ClusterStatus.prototype.stateStartTime = null; + NodeGroup.prototype.nodeGroupConfig = null; /** - * ClusterStatus substate. - * @member {google.cloud.dataproc.v1.ClusterStatus.Substate} substate - * @memberof google.cloud.dataproc.v1.ClusterStatus + * NodeGroup labels. + * @member {Object.} labels + * @memberof google.cloud.dataproc.v1.NodeGroup * @instance */ - ClusterStatus.prototype.substate = 0; + NodeGroup.prototype.labels = $util.emptyObject; /** - * Creates a new ClusterStatus instance using the specified properties. + * Creates a new NodeGroup instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.ClusterStatus + * @memberof google.cloud.dataproc.v1.NodeGroup * @static - * @param {google.cloud.dataproc.v1.IClusterStatus=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.ClusterStatus} ClusterStatus instance + * @param {google.cloud.dataproc.v1.INodeGroup=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.NodeGroup} NodeGroup instance */ - ClusterStatus.create = function create(properties) { - return new ClusterStatus(properties); + NodeGroup.create = function create(properties) { + return new NodeGroup(properties); }; /** - * Encodes the specified ClusterStatus message. Does not implicitly {@link google.cloud.dataproc.v1.ClusterStatus.verify|verify} messages. + * Encodes the specified NodeGroup message. Does not implicitly {@link google.cloud.dataproc.v1.NodeGroup.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.ClusterStatus + * @memberof google.cloud.dataproc.v1.NodeGroup * @static - * @param {google.cloud.dataproc.v1.IClusterStatus} message ClusterStatus message or plain object to encode + * @param {google.cloud.dataproc.v1.INodeGroup} message NodeGroup message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ClusterStatus.encode = function encode(message, writer) { + NodeGroup.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.state != null && Object.hasOwnProperty.call(message, "state")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.state); - if (message.detail != null && Object.hasOwnProperty.call(message, "detail")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.detail); - if (message.stateStartTime != null && Object.hasOwnProperty.call(message, "stateStartTime")) - $root.google.protobuf.Timestamp.encode(message.stateStartTime, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.substate != null && Object.hasOwnProperty.call(message, "substate")) - writer.uint32(/* id 4, wireType 0 =*/32).int32(message.substate); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.roles != null && message.roles.length) { + writer.uint32(/* id 2, wireType 2 =*/18).fork(); + for (var i = 0; i < message.roles.length; ++i) + writer.int32(message.roles[i]); + writer.ldelim(); + } + if (message.nodeGroupConfig != null && Object.hasOwnProperty.call(message, "nodeGroupConfig")) + $root.google.cloud.dataproc.v1.InstanceGroupConfig.encode(message.nodeGroupConfig, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.labels != null && Object.hasOwnProperty.call(message, "labels")) + for (var keys = Object.keys(message.labels), i = 0; i < keys.length; ++i) + writer.uint32(/* id 4, wireType 2 =*/34).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.labels[keys[i]]).ldelim(); return writer; }; /** - * Encodes the specified ClusterStatus message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ClusterStatus.verify|verify} messages. + * Encodes the specified NodeGroup message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.NodeGroup.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.ClusterStatus + * @memberof google.cloud.dataproc.v1.NodeGroup * @static - * @param {google.cloud.dataproc.v1.IClusterStatus} message ClusterStatus message or plain object to encode + * @param {google.cloud.dataproc.v1.INodeGroup} message NodeGroup message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ClusterStatus.encodeDelimited = function encodeDelimited(message, writer) { + NodeGroup.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ClusterStatus message from the specified reader or buffer. + * Decodes a NodeGroup message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.ClusterStatus + * @memberof google.cloud.dataproc.v1.NodeGroup * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.ClusterStatus} ClusterStatus + * @returns {google.cloud.dataproc.v1.NodeGroup} NodeGroup * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ClusterStatus.decode = function decode(reader, length) { + NodeGroup.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ClusterStatus(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.NodeGroup(), key, value; while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.state = reader.int32(); + message.name = reader.string(); break; } case 2: { - message.detail = reader.string(); + if (!(message.roles && message.roles.length)) + message.roles = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.roles.push(reader.int32()); + } else + message.roles.push(reader.int32()); break; } case 3: { - message.stateStartTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + message.nodeGroupConfig = $root.google.cloud.dataproc.v1.InstanceGroupConfig.decode(reader, reader.uint32()); break; } case 4: { - message.substate = reader.int32(); + if (message.labels === $util.emptyObject) + message.labels = {}; + var end2 = reader.uint32() + reader.pos; + key = ""; + value = ""; + while (reader.pos < end2) { + var tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.string(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.labels[key] = value; break; } default: @@ -15966,282 +16044,214 @@ }; /** - * Decodes a ClusterStatus message from the specified reader or buffer, length delimited. + * Decodes a NodeGroup message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.ClusterStatus + * @memberof google.cloud.dataproc.v1.NodeGroup * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.ClusterStatus} ClusterStatus + * @returns {google.cloud.dataproc.v1.NodeGroup} NodeGroup * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ClusterStatus.decodeDelimited = function decodeDelimited(reader) { + NodeGroup.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ClusterStatus message. + * Verifies a NodeGroup message. * @function verify - * @memberof google.cloud.dataproc.v1.ClusterStatus + * @memberof google.cloud.dataproc.v1.NodeGroup * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ClusterStatus.verify = function verify(message) { + NodeGroup.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.state != null && message.hasOwnProperty("state")) - switch (message.state) { - default: - return "state: enum value expected"; - case 0: - case 1: - case 2: - case 3: - case 9: - case 4: - case 5: - case 6: - case 7: - case 8: - break; - } - if (message.detail != null && message.hasOwnProperty("detail")) - if (!$util.isString(message.detail)) - return "detail: string expected"; - if (message.stateStartTime != null && message.hasOwnProperty("stateStartTime")) { - var error = $root.google.protobuf.Timestamp.verify(message.stateStartTime); + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.roles != null && message.hasOwnProperty("roles")) { + if (!Array.isArray(message.roles)) + return "roles: array expected"; + for (var i = 0; i < message.roles.length; ++i) + switch (message.roles[i]) { + default: + return "roles: enum value[] expected"; + case 0: + case 1: + break; + } + } + if (message.nodeGroupConfig != null && message.hasOwnProperty("nodeGroupConfig")) { + var error = $root.google.cloud.dataproc.v1.InstanceGroupConfig.verify(message.nodeGroupConfig); if (error) - return "stateStartTime." + error; + return "nodeGroupConfig." + error; + } + if (message.labels != null && message.hasOwnProperty("labels")) { + if (!$util.isObject(message.labels)) + return "labels: object expected"; + var key = Object.keys(message.labels); + for (var i = 0; i < key.length; ++i) + if (!$util.isString(message.labels[key[i]])) + return "labels: string{k:string} expected"; } - if (message.substate != null && message.hasOwnProperty("substate")) - switch (message.substate) { - default: - return "substate: enum value expected"; - case 0: - case 1: - case 2: - break; - } return null; }; /** - * Creates a ClusterStatus message from a plain object. Also converts values to their respective internal types. + * Creates a NodeGroup message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.ClusterStatus + * @memberof google.cloud.dataproc.v1.NodeGroup * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.ClusterStatus} ClusterStatus + * @returns {google.cloud.dataproc.v1.NodeGroup} NodeGroup */ - ClusterStatus.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.ClusterStatus) + NodeGroup.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.NodeGroup) return object; - var message = new $root.google.cloud.dataproc.v1.ClusterStatus(); - switch (object.state) { - default: - if (typeof object.state === "number") { - message.state = object.state; - break; - } - break; - case "UNKNOWN": - case 0: - message.state = 0; - break; - case "CREATING": - case 1: - message.state = 1; - break; - case "RUNNING": - case 2: - message.state = 2; - break; - case "ERROR": - case 3: - message.state = 3; - break; - case "ERROR_DUE_TO_UPDATE": - case 9: - message.state = 9; - break; - case "DELETING": - case 4: - message.state = 4; - break; - case "UPDATING": - case 5: - message.state = 5; - break; - case "STOPPING": - case 6: - message.state = 6; - break; - case "STOPPED": - case 7: - message.state = 7; - break; - case "STARTING": - case 8: - message.state = 8; - break; + var message = new $root.google.cloud.dataproc.v1.NodeGroup(); + if (object.name != null) + message.name = String(object.name); + if (object.roles) { + if (!Array.isArray(object.roles)) + throw TypeError(".google.cloud.dataproc.v1.NodeGroup.roles: array expected"); + message.roles = []; + for (var i = 0; i < object.roles.length; ++i) + switch (object.roles[i]) { + default: + if (typeof object.roles[i] === "number") { + message.roles[i] = object.roles[i]; + break; + } + case "ROLE_UNSPECIFIED": + case 0: + message.roles[i] = 0; + break; + case "DRIVER": + case 1: + message.roles[i] = 1; + break; + } } - if (object.detail != null) - message.detail = String(object.detail); - if (object.stateStartTime != null) { - if (typeof object.stateStartTime !== "object") - throw TypeError(".google.cloud.dataproc.v1.ClusterStatus.stateStartTime: object expected"); - message.stateStartTime = $root.google.protobuf.Timestamp.fromObject(object.stateStartTime); + if (object.nodeGroupConfig != null) { + if (typeof object.nodeGroupConfig !== "object") + throw TypeError(".google.cloud.dataproc.v1.NodeGroup.nodeGroupConfig: object expected"); + message.nodeGroupConfig = $root.google.cloud.dataproc.v1.InstanceGroupConfig.fromObject(object.nodeGroupConfig); } - switch (object.substate) { - default: - if (typeof object.substate === "number") { - message.substate = object.substate; - break; - } - break; - case "UNSPECIFIED": - case 0: - message.substate = 0; - break; - case "UNHEALTHY": - case 1: - message.substate = 1; - break; - case "STALE_STATUS": - case 2: - message.substate = 2; - break; + if (object.labels) { + if (typeof object.labels !== "object") + throw TypeError(".google.cloud.dataproc.v1.NodeGroup.labels: object expected"); + message.labels = {}; + for (var keys = Object.keys(object.labels), i = 0; i < keys.length; ++i) + message.labels[keys[i]] = String(object.labels[keys[i]]); } return message; }; /** - * Creates a plain object from a ClusterStatus message. Also converts values to other types if specified. + * Creates a plain object from a NodeGroup message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.ClusterStatus + * @memberof google.cloud.dataproc.v1.NodeGroup * @static - * @param {google.cloud.dataproc.v1.ClusterStatus} message ClusterStatus + * @param {google.cloud.dataproc.v1.NodeGroup} message NodeGroup * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ClusterStatus.toObject = function toObject(message, options) { + NodeGroup.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; + if (options.arrays || options.defaults) + object.roles = []; + if (options.objects || options.defaults) + object.labels = {}; if (options.defaults) { - object.state = options.enums === String ? "UNKNOWN" : 0; - object.detail = ""; - object.stateStartTime = null; - object.substate = options.enums === String ? "UNSPECIFIED" : 0; + object.name = ""; + object.nodeGroupConfig = null; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.roles && message.roles.length) { + object.roles = []; + for (var j = 0; j < message.roles.length; ++j) + object.roles[j] = options.enums === String ? $root.google.cloud.dataproc.v1.NodeGroup.Role[message.roles[j]] === undefined ? message.roles[j] : $root.google.cloud.dataproc.v1.NodeGroup.Role[message.roles[j]] : message.roles[j]; + } + if (message.nodeGroupConfig != null && message.hasOwnProperty("nodeGroupConfig")) + object.nodeGroupConfig = $root.google.cloud.dataproc.v1.InstanceGroupConfig.toObject(message.nodeGroupConfig, options); + var keys2; + if (message.labels && (keys2 = Object.keys(message.labels)).length) { + object.labels = {}; + for (var j = 0; j < keys2.length; ++j) + object.labels[keys2[j]] = message.labels[keys2[j]]; } - if (message.state != null && message.hasOwnProperty("state")) - object.state = options.enums === String ? $root.google.cloud.dataproc.v1.ClusterStatus.State[message.state] === undefined ? message.state : $root.google.cloud.dataproc.v1.ClusterStatus.State[message.state] : message.state; - if (message.detail != null && message.hasOwnProperty("detail")) - object.detail = message.detail; - if (message.stateStartTime != null && message.hasOwnProperty("stateStartTime")) - object.stateStartTime = $root.google.protobuf.Timestamp.toObject(message.stateStartTime, options); - if (message.substate != null && message.hasOwnProperty("substate")) - object.substate = options.enums === String ? $root.google.cloud.dataproc.v1.ClusterStatus.Substate[message.substate] === undefined ? message.substate : $root.google.cloud.dataproc.v1.ClusterStatus.Substate[message.substate] : message.substate; return object; }; /** - * Converts this ClusterStatus to JSON. + * Converts this NodeGroup to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.ClusterStatus + * @memberof google.cloud.dataproc.v1.NodeGroup * @instance * @returns {Object.} JSON object */ - ClusterStatus.prototype.toJSON = function toJSON() { + NodeGroup.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ClusterStatus + * Gets the default type url for NodeGroup * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.ClusterStatus + * @memberof google.cloud.dataproc.v1.NodeGroup * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ClusterStatus.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + NodeGroup.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.ClusterStatus"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.NodeGroup"; }; /** - * State enum. - * @name google.cloud.dataproc.v1.ClusterStatus.State - * @enum {number} - * @property {number} UNKNOWN=0 UNKNOWN value - * @property {number} CREATING=1 CREATING value - * @property {number} RUNNING=2 RUNNING value - * @property {number} ERROR=3 ERROR value - * @property {number} ERROR_DUE_TO_UPDATE=9 ERROR_DUE_TO_UPDATE value - * @property {number} DELETING=4 DELETING value - * @property {number} UPDATING=5 UPDATING value - * @property {number} STOPPING=6 STOPPING value - * @property {number} STOPPED=7 STOPPED value - * @property {number} STARTING=8 STARTING value - */ - ClusterStatus.State = (function() { - var valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "UNKNOWN"] = 0; - values[valuesById[1] = "CREATING"] = 1; - values[valuesById[2] = "RUNNING"] = 2; - values[valuesById[3] = "ERROR"] = 3; - values[valuesById[9] = "ERROR_DUE_TO_UPDATE"] = 9; - values[valuesById[4] = "DELETING"] = 4; - values[valuesById[5] = "UPDATING"] = 5; - values[valuesById[6] = "STOPPING"] = 6; - values[valuesById[7] = "STOPPED"] = 7; - values[valuesById[8] = "STARTING"] = 8; - return values; - })(); - - /** - * Substate enum. - * @name google.cloud.dataproc.v1.ClusterStatus.Substate + * Role enum. + * @name google.cloud.dataproc.v1.NodeGroup.Role * @enum {number} - * @property {number} UNSPECIFIED=0 UNSPECIFIED value - * @property {number} UNHEALTHY=1 UNHEALTHY value - * @property {number} STALE_STATUS=2 STALE_STATUS value + * @property {number} ROLE_UNSPECIFIED=0 ROLE_UNSPECIFIED value + * @property {number} DRIVER=1 DRIVER value */ - ClusterStatus.Substate = (function() { + NodeGroup.Role = (function() { var valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "UNSPECIFIED"] = 0; - values[valuesById[1] = "UNHEALTHY"] = 1; - values[valuesById[2] = "STALE_STATUS"] = 2; + values[valuesById[0] = "ROLE_UNSPECIFIED"] = 0; + values[valuesById[1] = "DRIVER"] = 1; return values; })(); - return ClusterStatus; + return NodeGroup; })(); - v1.SecurityConfig = (function() { + v1.NodeInitializationAction = (function() { /** - * Properties of a SecurityConfig. + * Properties of a NodeInitializationAction. * @memberof google.cloud.dataproc.v1 - * @interface ISecurityConfig - * @property {google.cloud.dataproc.v1.IKerberosConfig|null} [kerberosConfig] SecurityConfig kerberosConfig - * @property {google.cloud.dataproc.v1.IIdentityConfig|null} [identityConfig] SecurityConfig identityConfig + * @interface INodeInitializationAction + * @property {string|null} [executableFile] NodeInitializationAction executableFile + * @property {google.protobuf.IDuration|null} [executionTimeout] NodeInitializationAction executionTimeout */ /** - * Constructs a new SecurityConfig. + * Constructs a new NodeInitializationAction. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a SecurityConfig. - * @implements ISecurityConfig + * @classdesc Represents a NodeInitializationAction. + * @implements INodeInitializationAction * @constructor - * @param {google.cloud.dataproc.v1.ISecurityConfig=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.INodeInitializationAction=} [properties] Properties to set */ - function SecurityConfig(properties) { + function NodeInitializationAction(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -16249,89 +16259,89 @@ } /** - * SecurityConfig kerberosConfig. - * @member {google.cloud.dataproc.v1.IKerberosConfig|null|undefined} kerberosConfig - * @memberof google.cloud.dataproc.v1.SecurityConfig + * NodeInitializationAction executableFile. + * @member {string} executableFile + * @memberof google.cloud.dataproc.v1.NodeInitializationAction * @instance */ - SecurityConfig.prototype.kerberosConfig = null; + NodeInitializationAction.prototype.executableFile = ""; /** - * SecurityConfig identityConfig. - * @member {google.cloud.dataproc.v1.IIdentityConfig|null|undefined} identityConfig - * @memberof google.cloud.dataproc.v1.SecurityConfig + * NodeInitializationAction executionTimeout. + * @member {google.protobuf.IDuration|null|undefined} executionTimeout + * @memberof google.cloud.dataproc.v1.NodeInitializationAction * @instance */ - SecurityConfig.prototype.identityConfig = null; + NodeInitializationAction.prototype.executionTimeout = null; /** - * Creates a new SecurityConfig instance using the specified properties. + * Creates a new NodeInitializationAction instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.SecurityConfig + * @memberof google.cloud.dataproc.v1.NodeInitializationAction * @static - * @param {google.cloud.dataproc.v1.ISecurityConfig=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.SecurityConfig} SecurityConfig instance + * @param {google.cloud.dataproc.v1.INodeInitializationAction=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.NodeInitializationAction} NodeInitializationAction instance */ - SecurityConfig.create = function create(properties) { - return new SecurityConfig(properties); + NodeInitializationAction.create = function create(properties) { + return new NodeInitializationAction(properties); }; /** - * Encodes the specified SecurityConfig message. Does not implicitly {@link google.cloud.dataproc.v1.SecurityConfig.verify|verify} messages. + * Encodes the specified NodeInitializationAction message. Does not implicitly {@link google.cloud.dataproc.v1.NodeInitializationAction.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.SecurityConfig + * @memberof google.cloud.dataproc.v1.NodeInitializationAction * @static - * @param {google.cloud.dataproc.v1.ISecurityConfig} message SecurityConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.INodeInitializationAction} message NodeInitializationAction message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SecurityConfig.encode = function encode(message, writer) { + NodeInitializationAction.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.kerberosConfig != null && Object.hasOwnProperty.call(message, "kerberosConfig")) - $root.google.cloud.dataproc.v1.KerberosConfig.encode(message.kerberosConfig, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.identityConfig != null && Object.hasOwnProperty.call(message, "identityConfig")) - $root.google.cloud.dataproc.v1.IdentityConfig.encode(message.identityConfig, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.executableFile != null && Object.hasOwnProperty.call(message, "executableFile")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.executableFile); + if (message.executionTimeout != null && Object.hasOwnProperty.call(message, "executionTimeout")) + $root.google.protobuf.Duration.encode(message.executionTimeout, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified SecurityConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.SecurityConfig.verify|verify} messages. + * Encodes the specified NodeInitializationAction message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.NodeInitializationAction.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.SecurityConfig + * @memberof google.cloud.dataproc.v1.NodeInitializationAction * @static - * @param {google.cloud.dataproc.v1.ISecurityConfig} message SecurityConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.INodeInitializationAction} message NodeInitializationAction message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SecurityConfig.encodeDelimited = function encodeDelimited(message, writer) { + NodeInitializationAction.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SecurityConfig message from the specified reader or buffer. + * Decodes a NodeInitializationAction message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.SecurityConfig + * @memberof google.cloud.dataproc.v1.NodeInitializationAction * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.SecurityConfig} SecurityConfig + * @returns {google.cloud.dataproc.v1.NodeInitializationAction} NodeInitializationAction * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SecurityConfig.decode = function decode(reader, length) { + NodeInitializationAction.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.SecurityConfig(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.NodeInitializationAction(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.kerberosConfig = $root.google.cloud.dataproc.v1.KerberosConfig.decode(reader, reader.uint32()); + message.executableFile = reader.string(); break; } case 2: { - message.identityConfig = $root.google.cloud.dataproc.v1.IdentityConfig.decode(reader, reader.uint32()); + message.executionTimeout = $root.google.protobuf.Duration.decode(reader, reader.uint32()); break; } default: @@ -16343,155 +16353,139 @@ }; /** - * Decodes a SecurityConfig message from the specified reader or buffer, length delimited. + * Decodes a NodeInitializationAction message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.SecurityConfig + * @memberof google.cloud.dataproc.v1.NodeInitializationAction * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.SecurityConfig} SecurityConfig + * @returns {google.cloud.dataproc.v1.NodeInitializationAction} NodeInitializationAction * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SecurityConfig.decodeDelimited = function decodeDelimited(reader) { + NodeInitializationAction.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SecurityConfig message. + * Verifies a NodeInitializationAction message. * @function verify - * @memberof google.cloud.dataproc.v1.SecurityConfig + * @memberof google.cloud.dataproc.v1.NodeInitializationAction * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SecurityConfig.verify = function verify(message) { + NodeInitializationAction.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.kerberosConfig != null && message.hasOwnProperty("kerberosConfig")) { - var error = $root.google.cloud.dataproc.v1.KerberosConfig.verify(message.kerberosConfig); - if (error) - return "kerberosConfig." + error; - } - if (message.identityConfig != null && message.hasOwnProperty("identityConfig")) { - var error = $root.google.cloud.dataproc.v1.IdentityConfig.verify(message.identityConfig); + if (message.executableFile != null && message.hasOwnProperty("executableFile")) + if (!$util.isString(message.executableFile)) + return "executableFile: string expected"; + if (message.executionTimeout != null && message.hasOwnProperty("executionTimeout")) { + var error = $root.google.protobuf.Duration.verify(message.executionTimeout); if (error) - return "identityConfig." + error; + return "executionTimeout." + error; } return null; }; /** - * Creates a SecurityConfig message from a plain object. Also converts values to their respective internal types. + * Creates a NodeInitializationAction message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.SecurityConfig + * @memberof google.cloud.dataproc.v1.NodeInitializationAction * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.SecurityConfig} SecurityConfig + * @returns {google.cloud.dataproc.v1.NodeInitializationAction} NodeInitializationAction */ - SecurityConfig.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.SecurityConfig) + NodeInitializationAction.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.NodeInitializationAction) return object; - var message = new $root.google.cloud.dataproc.v1.SecurityConfig(); - if (object.kerberosConfig != null) { - if (typeof object.kerberosConfig !== "object") - throw TypeError(".google.cloud.dataproc.v1.SecurityConfig.kerberosConfig: object expected"); - message.kerberosConfig = $root.google.cloud.dataproc.v1.KerberosConfig.fromObject(object.kerberosConfig); - } - if (object.identityConfig != null) { - if (typeof object.identityConfig !== "object") - throw TypeError(".google.cloud.dataproc.v1.SecurityConfig.identityConfig: object expected"); - message.identityConfig = $root.google.cloud.dataproc.v1.IdentityConfig.fromObject(object.identityConfig); + var message = new $root.google.cloud.dataproc.v1.NodeInitializationAction(); + if (object.executableFile != null) + message.executableFile = String(object.executableFile); + if (object.executionTimeout != null) { + if (typeof object.executionTimeout !== "object") + throw TypeError(".google.cloud.dataproc.v1.NodeInitializationAction.executionTimeout: object expected"); + message.executionTimeout = $root.google.protobuf.Duration.fromObject(object.executionTimeout); } return message; }; /** - * Creates a plain object from a SecurityConfig message. Also converts values to other types if specified. + * Creates a plain object from a NodeInitializationAction message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.SecurityConfig + * @memberof google.cloud.dataproc.v1.NodeInitializationAction * @static - * @param {google.cloud.dataproc.v1.SecurityConfig} message SecurityConfig + * @param {google.cloud.dataproc.v1.NodeInitializationAction} message NodeInitializationAction * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SecurityConfig.toObject = function toObject(message, options) { + NodeInitializationAction.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { - object.kerberosConfig = null; - object.identityConfig = null; + object.executableFile = ""; + object.executionTimeout = null; } - if (message.kerberosConfig != null && message.hasOwnProperty("kerberosConfig")) - object.kerberosConfig = $root.google.cloud.dataproc.v1.KerberosConfig.toObject(message.kerberosConfig, options); - if (message.identityConfig != null && message.hasOwnProperty("identityConfig")) - object.identityConfig = $root.google.cloud.dataproc.v1.IdentityConfig.toObject(message.identityConfig, options); + if (message.executableFile != null && message.hasOwnProperty("executableFile")) + object.executableFile = message.executableFile; + if (message.executionTimeout != null && message.hasOwnProperty("executionTimeout")) + object.executionTimeout = $root.google.protobuf.Duration.toObject(message.executionTimeout, options); return object; }; /** - * Converts this SecurityConfig to JSON. + * Converts this NodeInitializationAction to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.SecurityConfig + * @memberof google.cloud.dataproc.v1.NodeInitializationAction * @instance * @returns {Object.} JSON object */ - SecurityConfig.prototype.toJSON = function toJSON() { + NodeInitializationAction.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SecurityConfig + * Gets the default type url for NodeInitializationAction * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.SecurityConfig + * @memberof google.cloud.dataproc.v1.NodeInitializationAction * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SecurityConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + NodeInitializationAction.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.SecurityConfig"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.NodeInitializationAction"; }; - return SecurityConfig; + return NodeInitializationAction; })(); - v1.KerberosConfig = (function() { + v1.ClusterStatus = (function() { /** - * Properties of a KerberosConfig. + * Properties of a ClusterStatus. * @memberof google.cloud.dataproc.v1 - * @interface IKerberosConfig - * @property {boolean|null} [enableKerberos] KerberosConfig enableKerberos - * @property {string|null} [rootPrincipalPasswordUri] KerberosConfig rootPrincipalPasswordUri - * @property {string|null} [kmsKeyUri] KerberosConfig kmsKeyUri - * @property {string|null} [keystoreUri] KerberosConfig keystoreUri - * @property {string|null} [truststoreUri] KerberosConfig truststoreUri - * @property {string|null} [keystorePasswordUri] KerberosConfig keystorePasswordUri - * @property {string|null} [keyPasswordUri] KerberosConfig keyPasswordUri - * @property {string|null} [truststorePasswordUri] KerberosConfig truststorePasswordUri - * @property {string|null} [crossRealmTrustRealm] KerberosConfig crossRealmTrustRealm - * @property {string|null} [crossRealmTrustKdc] KerberosConfig crossRealmTrustKdc - * @property {string|null} [crossRealmTrustAdminServer] KerberosConfig crossRealmTrustAdminServer - * @property {string|null} [crossRealmTrustSharedPasswordUri] KerberosConfig crossRealmTrustSharedPasswordUri - * @property {string|null} [kdcDbKeyUri] KerberosConfig kdcDbKeyUri - * @property {number|null} [tgtLifetimeHours] KerberosConfig tgtLifetimeHours - * @property {string|null} [realm] KerberosConfig realm + * @interface IClusterStatus + * @property {google.cloud.dataproc.v1.ClusterStatus.State|null} [state] ClusterStatus state + * @property {string|null} [detail] ClusterStatus detail + * @property {google.protobuf.ITimestamp|null} [stateStartTime] ClusterStatus stateStartTime + * @property {google.cloud.dataproc.v1.ClusterStatus.Substate|null} [substate] ClusterStatus substate */ /** - * Constructs a new KerberosConfig. + * Constructs a new ClusterStatus. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a KerberosConfig. - * @implements IKerberosConfig + * @classdesc Represents a ClusterStatus. + * @implements IClusterStatus * @constructor - * @param {google.cloud.dataproc.v1.IKerberosConfig=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IClusterStatus=} [properties] Properties to set */ - function KerberosConfig(properties) { + function ClusterStatus(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -16499,271 +16493,117 @@ } /** - * KerberosConfig enableKerberos. - * @member {boolean} enableKerberos - * @memberof google.cloud.dataproc.v1.KerberosConfig + * ClusterStatus state. + * @member {google.cloud.dataproc.v1.ClusterStatus.State} state + * @memberof google.cloud.dataproc.v1.ClusterStatus * @instance */ - KerberosConfig.prototype.enableKerberos = false; + ClusterStatus.prototype.state = 0; /** - * KerberosConfig rootPrincipalPasswordUri. - * @member {string} rootPrincipalPasswordUri - * @memberof google.cloud.dataproc.v1.KerberosConfig + * ClusterStatus detail. + * @member {string} detail + * @memberof google.cloud.dataproc.v1.ClusterStatus * @instance */ - KerberosConfig.prototype.rootPrincipalPasswordUri = ""; + ClusterStatus.prototype.detail = ""; /** - * KerberosConfig kmsKeyUri. - * @member {string} kmsKeyUri - * @memberof google.cloud.dataproc.v1.KerberosConfig + * ClusterStatus stateStartTime. + * @member {google.protobuf.ITimestamp|null|undefined} stateStartTime + * @memberof google.cloud.dataproc.v1.ClusterStatus * @instance */ - KerberosConfig.prototype.kmsKeyUri = ""; + ClusterStatus.prototype.stateStartTime = null; /** - * KerberosConfig keystoreUri. - * @member {string} keystoreUri - * @memberof google.cloud.dataproc.v1.KerberosConfig + * ClusterStatus substate. + * @member {google.cloud.dataproc.v1.ClusterStatus.Substate} substate + * @memberof google.cloud.dataproc.v1.ClusterStatus * @instance */ - KerberosConfig.prototype.keystoreUri = ""; + ClusterStatus.prototype.substate = 0; /** - * KerberosConfig truststoreUri. - * @member {string} truststoreUri - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @instance + * Creates a new ClusterStatus instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1.ClusterStatus + * @static + * @param {google.cloud.dataproc.v1.IClusterStatus=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.ClusterStatus} ClusterStatus instance */ - KerberosConfig.prototype.truststoreUri = ""; + ClusterStatus.create = function create(properties) { + return new ClusterStatus(properties); + }; /** - * KerberosConfig keystorePasswordUri. - * @member {string} keystorePasswordUri - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @instance + * Encodes the specified ClusterStatus message. Does not implicitly {@link google.cloud.dataproc.v1.ClusterStatus.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1.ClusterStatus + * @static + * @param {google.cloud.dataproc.v1.IClusterStatus} message ClusterStatus message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - KerberosConfig.prototype.keystorePasswordUri = ""; + ClusterStatus.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.state != null && Object.hasOwnProperty.call(message, "state")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.state); + if (message.detail != null && Object.hasOwnProperty.call(message, "detail")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.detail); + if (message.stateStartTime != null && Object.hasOwnProperty.call(message, "stateStartTime")) + $root.google.protobuf.Timestamp.encode(message.stateStartTime, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.substate != null && Object.hasOwnProperty.call(message, "substate")) + writer.uint32(/* id 4, wireType 0 =*/32).int32(message.substate); + return writer; + }; /** - * KerberosConfig keyPasswordUri. - * @member {string} keyPasswordUri - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @instance + * Encodes the specified ClusterStatus message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ClusterStatus.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1.ClusterStatus + * @static + * @param {google.cloud.dataproc.v1.IClusterStatus} message ClusterStatus message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - KerberosConfig.prototype.keyPasswordUri = ""; + ClusterStatus.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; /** - * KerberosConfig truststorePasswordUri. - * @member {string} truststorePasswordUri - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @instance + * Decodes a ClusterStatus message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1.ClusterStatus + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1.ClusterStatus} ClusterStatus + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - KerberosConfig.prototype.truststorePasswordUri = ""; - - /** - * KerberosConfig crossRealmTrustRealm. - * @member {string} crossRealmTrustRealm - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @instance - */ - KerberosConfig.prototype.crossRealmTrustRealm = ""; - - /** - * KerberosConfig crossRealmTrustKdc. - * @member {string} crossRealmTrustKdc - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @instance - */ - KerberosConfig.prototype.crossRealmTrustKdc = ""; - - /** - * KerberosConfig crossRealmTrustAdminServer. - * @member {string} crossRealmTrustAdminServer - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @instance - */ - KerberosConfig.prototype.crossRealmTrustAdminServer = ""; - - /** - * KerberosConfig crossRealmTrustSharedPasswordUri. - * @member {string} crossRealmTrustSharedPasswordUri - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @instance - */ - KerberosConfig.prototype.crossRealmTrustSharedPasswordUri = ""; - - /** - * KerberosConfig kdcDbKeyUri. - * @member {string} kdcDbKeyUri - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @instance - */ - KerberosConfig.prototype.kdcDbKeyUri = ""; - - /** - * KerberosConfig tgtLifetimeHours. - * @member {number} tgtLifetimeHours - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @instance - */ - KerberosConfig.prototype.tgtLifetimeHours = 0; - - /** - * KerberosConfig realm. - * @member {string} realm - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @instance - */ - KerberosConfig.prototype.realm = ""; - - /** - * Creates a new KerberosConfig instance using the specified properties. - * @function create - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @static - * @param {google.cloud.dataproc.v1.IKerberosConfig=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.KerberosConfig} KerberosConfig instance - */ - KerberosConfig.create = function create(properties) { - return new KerberosConfig(properties); - }; - - /** - * Encodes the specified KerberosConfig message. Does not implicitly {@link google.cloud.dataproc.v1.KerberosConfig.verify|verify} messages. - * @function encode - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @static - * @param {google.cloud.dataproc.v1.IKerberosConfig} message KerberosConfig message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - KerberosConfig.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.enableKerberos != null && Object.hasOwnProperty.call(message, "enableKerberos")) - writer.uint32(/* id 1, wireType 0 =*/8).bool(message.enableKerberos); - if (message.rootPrincipalPasswordUri != null && Object.hasOwnProperty.call(message, "rootPrincipalPasswordUri")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.rootPrincipalPasswordUri); - if (message.kmsKeyUri != null && Object.hasOwnProperty.call(message, "kmsKeyUri")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.kmsKeyUri); - if (message.keystoreUri != null && Object.hasOwnProperty.call(message, "keystoreUri")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.keystoreUri); - if (message.truststoreUri != null && Object.hasOwnProperty.call(message, "truststoreUri")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.truststoreUri); - if (message.keystorePasswordUri != null && Object.hasOwnProperty.call(message, "keystorePasswordUri")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.keystorePasswordUri); - if (message.keyPasswordUri != null && Object.hasOwnProperty.call(message, "keyPasswordUri")) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.keyPasswordUri); - if (message.truststorePasswordUri != null && Object.hasOwnProperty.call(message, "truststorePasswordUri")) - writer.uint32(/* id 8, wireType 2 =*/66).string(message.truststorePasswordUri); - if (message.crossRealmTrustRealm != null && Object.hasOwnProperty.call(message, "crossRealmTrustRealm")) - writer.uint32(/* id 9, wireType 2 =*/74).string(message.crossRealmTrustRealm); - if (message.crossRealmTrustKdc != null && Object.hasOwnProperty.call(message, "crossRealmTrustKdc")) - writer.uint32(/* id 10, wireType 2 =*/82).string(message.crossRealmTrustKdc); - if (message.crossRealmTrustAdminServer != null && Object.hasOwnProperty.call(message, "crossRealmTrustAdminServer")) - writer.uint32(/* id 11, wireType 2 =*/90).string(message.crossRealmTrustAdminServer); - if (message.crossRealmTrustSharedPasswordUri != null && Object.hasOwnProperty.call(message, "crossRealmTrustSharedPasswordUri")) - writer.uint32(/* id 12, wireType 2 =*/98).string(message.crossRealmTrustSharedPasswordUri); - if (message.kdcDbKeyUri != null && Object.hasOwnProperty.call(message, "kdcDbKeyUri")) - writer.uint32(/* id 13, wireType 2 =*/106).string(message.kdcDbKeyUri); - if (message.tgtLifetimeHours != null && Object.hasOwnProperty.call(message, "tgtLifetimeHours")) - writer.uint32(/* id 14, wireType 0 =*/112).int32(message.tgtLifetimeHours); - if (message.realm != null && Object.hasOwnProperty.call(message, "realm")) - writer.uint32(/* id 15, wireType 2 =*/122).string(message.realm); - return writer; - }; - - /** - * Encodes the specified KerberosConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.KerberosConfig.verify|verify} messages. - * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @static - * @param {google.cloud.dataproc.v1.IKerberosConfig} message KerberosConfig message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - KerberosConfig.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; - - /** - * Decodes a KerberosConfig message from the specified reader or buffer. - * @function decode - * @memberof google.cloud.dataproc.v1.KerberosConfig - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.KerberosConfig} KerberosConfig - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - KerberosConfig.decode = function decode(reader, length) { + ClusterStatus.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.KerberosConfig(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ClusterStatus(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.enableKerberos = reader.bool(); + message.state = reader.int32(); break; } case 2: { - message.rootPrincipalPasswordUri = reader.string(); + message.detail = reader.string(); break; } case 3: { - message.kmsKeyUri = reader.string(); + message.stateStartTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); break; } case 4: { - message.keystoreUri = reader.string(); - break; - } - case 5: { - message.truststoreUri = reader.string(); - break; - } - case 6: { - message.keystorePasswordUri = reader.string(); - break; - } - case 7: { - message.keyPasswordUri = reader.string(); - break; - } - case 8: { - message.truststorePasswordUri = reader.string(); - break; - } - case 9: { - message.crossRealmTrustRealm = reader.string(); - break; - } - case 10: { - message.crossRealmTrustKdc = reader.string(); - break; - } - case 11: { - message.crossRealmTrustAdminServer = reader.string(); - break; - } - case 12: { - message.crossRealmTrustSharedPasswordUri = reader.string(); - break; - } - case 13: { - message.kdcDbKeyUri = reader.string(); - break; - } - case 14: { - message.tgtLifetimeHours = reader.int32(); - break; - } - case 15: { - message.realm = reader.string(); + message.substate = reader.int32(); break; } default: @@ -16775,236 +16615,282 @@ }; /** - * Decodes a KerberosConfig message from the specified reader or buffer, length delimited. + * Decodes a ClusterStatus message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.KerberosConfig + * @memberof google.cloud.dataproc.v1.ClusterStatus * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.KerberosConfig} KerberosConfig + * @returns {google.cloud.dataproc.v1.ClusterStatus} ClusterStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - KerberosConfig.decodeDelimited = function decodeDelimited(reader) { + ClusterStatus.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a KerberosConfig message. + * Verifies a ClusterStatus message. * @function verify - * @memberof google.cloud.dataproc.v1.KerberosConfig + * @memberof google.cloud.dataproc.v1.ClusterStatus * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - KerberosConfig.verify = function verify(message) { + ClusterStatus.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.enableKerberos != null && message.hasOwnProperty("enableKerberos")) - if (typeof message.enableKerberos !== "boolean") - return "enableKerberos: boolean expected"; - if (message.rootPrincipalPasswordUri != null && message.hasOwnProperty("rootPrincipalPasswordUri")) - if (!$util.isString(message.rootPrincipalPasswordUri)) - return "rootPrincipalPasswordUri: string expected"; - if (message.kmsKeyUri != null && message.hasOwnProperty("kmsKeyUri")) - if (!$util.isString(message.kmsKeyUri)) - return "kmsKeyUri: string expected"; - if (message.keystoreUri != null && message.hasOwnProperty("keystoreUri")) - if (!$util.isString(message.keystoreUri)) - return "keystoreUri: string expected"; - if (message.truststoreUri != null && message.hasOwnProperty("truststoreUri")) - if (!$util.isString(message.truststoreUri)) - return "truststoreUri: string expected"; - if (message.keystorePasswordUri != null && message.hasOwnProperty("keystorePasswordUri")) - if (!$util.isString(message.keystorePasswordUri)) - return "keystorePasswordUri: string expected"; - if (message.keyPasswordUri != null && message.hasOwnProperty("keyPasswordUri")) - if (!$util.isString(message.keyPasswordUri)) - return "keyPasswordUri: string expected"; - if (message.truststorePasswordUri != null && message.hasOwnProperty("truststorePasswordUri")) - if (!$util.isString(message.truststorePasswordUri)) - return "truststorePasswordUri: string expected"; - if (message.crossRealmTrustRealm != null && message.hasOwnProperty("crossRealmTrustRealm")) - if (!$util.isString(message.crossRealmTrustRealm)) - return "crossRealmTrustRealm: string expected"; - if (message.crossRealmTrustKdc != null && message.hasOwnProperty("crossRealmTrustKdc")) - if (!$util.isString(message.crossRealmTrustKdc)) - return "crossRealmTrustKdc: string expected"; - if (message.crossRealmTrustAdminServer != null && message.hasOwnProperty("crossRealmTrustAdminServer")) - if (!$util.isString(message.crossRealmTrustAdminServer)) - return "crossRealmTrustAdminServer: string expected"; - if (message.crossRealmTrustSharedPasswordUri != null && message.hasOwnProperty("crossRealmTrustSharedPasswordUri")) - if (!$util.isString(message.crossRealmTrustSharedPasswordUri)) - return "crossRealmTrustSharedPasswordUri: string expected"; - if (message.kdcDbKeyUri != null && message.hasOwnProperty("kdcDbKeyUri")) - if (!$util.isString(message.kdcDbKeyUri)) - return "kdcDbKeyUri: string expected"; - if (message.tgtLifetimeHours != null && message.hasOwnProperty("tgtLifetimeHours")) - if (!$util.isInteger(message.tgtLifetimeHours)) - return "tgtLifetimeHours: integer expected"; - if (message.realm != null && message.hasOwnProperty("realm")) - if (!$util.isString(message.realm)) - return "realm: string expected"; - return null; - }; - - /** - * Creates a KerberosConfig message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof google.cloud.dataproc.v1.KerberosConfig + if (message.state != null && message.hasOwnProperty("state")) + switch (message.state) { + default: + return "state: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 9: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } + if (message.detail != null && message.hasOwnProperty("detail")) + if (!$util.isString(message.detail)) + return "detail: string expected"; + if (message.stateStartTime != null && message.hasOwnProperty("stateStartTime")) { + var error = $root.google.protobuf.Timestamp.verify(message.stateStartTime); + if (error) + return "stateStartTime." + error; + } + if (message.substate != null && message.hasOwnProperty("substate")) + switch (message.substate) { + default: + return "substate: enum value expected"; + case 0: + case 1: + case 2: + break; + } + return null; + }; + + /** + * Creates a ClusterStatus message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.ClusterStatus * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.KerberosConfig} KerberosConfig + * @returns {google.cloud.dataproc.v1.ClusterStatus} ClusterStatus */ - KerberosConfig.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.KerberosConfig) + ClusterStatus.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.ClusterStatus) return object; - var message = new $root.google.cloud.dataproc.v1.KerberosConfig(); - if (object.enableKerberos != null) - message.enableKerberos = Boolean(object.enableKerberos); - if (object.rootPrincipalPasswordUri != null) - message.rootPrincipalPasswordUri = String(object.rootPrincipalPasswordUri); - if (object.kmsKeyUri != null) - message.kmsKeyUri = String(object.kmsKeyUri); - if (object.keystoreUri != null) - message.keystoreUri = String(object.keystoreUri); - if (object.truststoreUri != null) - message.truststoreUri = String(object.truststoreUri); - if (object.keystorePasswordUri != null) - message.keystorePasswordUri = String(object.keystorePasswordUri); - if (object.keyPasswordUri != null) - message.keyPasswordUri = String(object.keyPasswordUri); - if (object.truststorePasswordUri != null) - message.truststorePasswordUri = String(object.truststorePasswordUri); - if (object.crossRealmTrustRealm != null) - message.crossRealmTrustRealm = String(object.crossRealmTrustRealm); - if (object.crossRealmTrustKdc != null) - message.crossRealmTrustKdc = String(object.crossRealmTrustKdc); - if (object.crossRealmTrustAdminServer != null) - message.crossRealmTrustAdminServer = String(object.crossRealmTrustAdminServer); - if (object.crossRealmTrustSharedPasswordUri != null) - message.crossRealmTrustSharedPasswordUri = String(object.crossRealmTrustSharedPasswordUri); - if (object.kdcDbKeyUri != null) - message.kdcDbKeyUri = String(object.kdcDbKeyUri); - if (object.tgtLifetimeHours != null) - message.tgtLifetimeHours = object.tgtLifetimeHours | 0; - if (object.realm != null) - message.realm = String(object.realm); + var message = new $root.google.cloud.dataproc.v1.ClusterStatus(); + switch (object.state) { + default: + if (typeof object.state === "number") { + message.state = object.state; + break; + } + break; + case "UNKNOWN": + case 0: + message.state = 0; + break; + case "CREATING": + case 1: + message.state = 1; + break; + case "RUNNING": + case 2: + message.state = 2; + break; + case "ERROR": + case 3: + message.state = 3; + break; + case "ERROR_DUE_TO_UPDATE": + case 9: + message.state = 9; + break; + case "DELETING": + case 4: + message.state = 4; + break; + case "UPDATING": + case 5: + message.state = 5; + break; + case "STOPPING": + case 6: + message.state = 6; + break; + case "STOPPED": + case 7: + message.state = 7; + break; + case "STARTING": + case 8: + message.state = 8; + break; + } + if (object.detail != null) + message.detail = String(object.detail); + if (object.stateStartTime != null) { + if (typeof object.stateStartTime !== "object") + throw TypeError(".google.cloud.dataproc.v1.ClusterStatus.stateStartTime: object expected"); + message.stateStartTime = $root.google.protobuf.Timestamp.fromObject(object.stateStartTime); + } + switch (object.substate) { + default: + if (typeof object.substate === "number") { + message.substate = object.substate; + break; + } + break; + case "UNSPECIFIED": + case 0: + message.substate = 0; + break; + case "UNHEALTHY": + case 1: + message.substate = 1; + break; + case "STALE_STATUS": + case 2: + message.substate = 2; + break; + } return message; }; /** - * Creates a plain object from a KerberosConfig message. Also converts values to other types if specified. + * Creates a plain object from a ClusterStatus message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.KerberosConfig + * @memberof google.cloud.dataproc.v1.ClusterStatus * @static - * @param {google.cloud.dataproc.v1.KerberosConfig} message KerberosConfig + * @param {google.cloud.dataproc.v1.ClusterStatus} message ClusterStatus * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - KerberosConfig.toObject = function toObject(message, options) { + ClusterStatus.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { - object.enableKerberos = false; - object.rootPrincipalPasswordUri = ""; - object.kmsKeyUri = ""; - object.keystoreUri = ""; - object.truststoreUri = ""; - object.keystorePasswordUri = ""; - object.keyPasswordUri = ""; - object.truststorePasswordUri = ""; - object.crossRealmTrustRealm = ""; - object.crossRealmTrustKdc = ""; - object.crossRealmTrustAdminServer = ""; - object.crossRealmTrustSharedPasswordUri = ""; - object.kdcDbKeyUri = ""; - object.tgtLifetimeHours = 0; - object.realm = ""; + object.state = options.enums === String ? "UNKNOWN" : 0; + object.detail = ""; + object.stateStartTime = null; + object.substate = options.enums === String ? "UNSPECIFIED" : 0; } - if (message.enableKerberos != null && message.hasOwnProperty("enableKerberos")) - object.enableKerberos = message.enableKerberos; - if (message.rootPrincipalPasswordUri != null && message.hasOwnProperty("rootPrincipalPasswordUri")) - object.rootPrincipalPasswordUri = message.rootPrincipalPasswordUri; - if (message.kmsKeyUri != null && message.hasOwnProperty("kmsKeyUri")) - object.kmsKeyUri = message.kmsKeyUri; - if (message.keystoreUri != null && message.hasOwnProperty("keystoreUri")) - object.keystoreUri = message.keystoreUri; - if (message.truststoreUri != null && message.hasOwnProperty("truststoreUri")) - object.truststoreUri = message.truststoreUri; - if (message.keystorePasswordUri != null && message.hasOwnProperty("keystorePasswordUri")) - object.keystorePasswordUri = message.keystorePasswordUri; - if (message.keyPasswordUri != null && message.hasOwnProperty("keyPasswordUri")) - object.keyPasswordUri = message.keyPasswordUri; - if (message.truststorePasswordUri != null && message.hasOwnProperty("truststorePasswordUri")) - object.truststorePasswordUri = message.truststorePasswordUri; - if (message.crossRealmTrustRealm != null && message.hasOwnProperty("crossRealmTrustRealm")) - object.crossRealmTrustRealm = message.crossRealmTrustRealm; - if (message.crossRealmTrustKdc != null && message.hasOwnProperty("crossRealmTrustKdc")) - object.crossRealmTrustKdc = message.crossRealmTrustKdc; - if (message.crossRealmTrustAdminServer != null && message.hasOwnProperty("crossRealmTrustAdminServer")) - object.crossRealmTrustAdminServer = message.crossRealmTrustAdminServer; - if (message.crossRealmTrustSharedPasswordUri != null && message.hasOwnProperty("crossRealmTrustSharedPasswordUri")) - object.crossRealmTrustSharedPasswordUri = message.crossRealmTrustSharedPasswordUri; - if (message.kdcDbKeyUri != null && message.hasOwnProperty("kdcDbKeyUri")) - object.kdcDbKeyUri = message.kdcDbKeyUri; - if (message.tgtLifetimeHours != null && message.hasOwnProperty("tgtLifetimeHours")) - object.tgtLifetimeHours = message.tgtLifetimeHours; - if (message.realm != null && message.hasOwnProperty("realm")) - object.realm = message.realm; + if (message.state != null && message.hasOwnProperty("state")) + object.state = options.enums === String ? $root.google.cloud.dataproc.v1.ClusterStatus.State[message.state] === undefined ? message.state : $root.google.cloud.dataproc.v1.ClusterStatus.State[message.state] : message.state; + if (message.detail != null && message.hasOwnProperty("detail")) + object.detail = message.detail; + if (message.stateStartTime != null && message.hasOwnProperty("stateStartTime")) + object.stateStartTime = $root.google.protobuf.Timestamp.toObject(message.stateStartTime, options); + if (message.substate != null && message.hasOwnProperty("substate")) + object.substate = options.enums === String ? $root.google.cloud.dataproc.v1.ClusterStatus.Substate[message.substate] === undefined ? message.substate : $root.google.cloud.dataproc.v1.ClusterStatus.Substate[message.substate] : message.substate; return object; }; /** - * Converts this KerberosConfig to JSON. + * Converts this ClusterStatus to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.KerberosConfig + * @memberof google.cloud.dataproc.v1.ClusterStatus * @instance * @returns {Object.} JSON object */ - KerberosConfig.prototype.toJSON = function toJSON() { + ClusterStatus.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for KerberosConfig + * Gets the default type url for ClusterStatus * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.KerberosConfig + * @memberof google.cloud.dataproc.v1.ClusterStatus * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - KerberosConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ClusterStatus.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.KerberosConfig"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.ClusterStatus"; }; - return KerberosConfig; - })(); - - v1.IdentityConfig = (function() { - /** - * Properties of an IdentityConfig. - * @memberof google.cloud.dataproc.v1 - * @interface IIdentityConfig - * @property {Object.|null} [userServiceAccountMapping] IdentityConfig userServiceAccountMapping + * State enum. + * @name google.cloud.dataproc.v1.ClusterStatus.State + * @enum {number} + * @property {number} UNKNOWN=0 UNKNOWN value + * @property {number} CREATING=1 CREATING value + * @property {number} RUNNING=2 RUNNING value + * @property {number} ERROR=3 ERROR value + * @property {number} ERROR_DUE_TO_UPDATE=9 ERROR_DUE_TO_UPDATE value + * @property {number} DELETING=4 DELETING value + * @property {number} UPDATING=5 UPDATING value + * @property {number} STOPPING=6 STOPPING value + * @property {number} STOPPED=7 STOPPED value + * @property {number} STARTING=8 STARTING value */ - + ClusterStatus.State = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNKNOWN"] = 0; + values[valuesById[1] = "CREATING"] = 1; + values[valuesById[2] = "RUNNING"] = 2; + values[valuesById[3] = "ERROR"] = 3; + values[valuesById[9] = "ERROR_DUE_TO_UPDATE"] = 9; + values[valuesById[4] = "DELETING"] = 4; + values[valuesById[5] = "UPDATING"] = 5; + values[valuesById[6] = "STOPPING"] = 6; + values[valuesById[7] = "STOPPED"] = 7; + values[valuesById[8] = "STARTING"] = 8; + return values; + })(); + /** - * Constructs a new IdentityConfig. + * Substate enum. + * @name google.cloud.dataproc.v1.ClusterStatus.Substate + * @enum {number} + * @property {number} UNSPECIFIED=0 UNSPECIFIED value + * @property {number} UNHEALTHY=1 UNHEALTHY value + * @property {number} STALE_STATUS=2 STALE_STATUS value + */ + ClusterStatus.Substate = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNSPECIFIED"] = 0; + values[valuesById[1] = "UNHEALTHY"] = 1; + values[valuesById[2] = "STALE_STATUS"] = 2; + return values; + })(); + + return ClusterStatus; + })(); + + v1.SecurityConfig = (function() { + + /** + * Properties of a SecurityConfig. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents an IdentityConfig. - * @implements IIdentityConfig + * @interface ISecurityConfig + * @property {google.cloud.dataproc.v1.IKerberosConfig|null} [kerberosConfig] SecurityConfig kerberosConfig + * @property {google.cloud.dataproc.v1.IIdentityConfig|null} [identityConfig] SecurityConfig identityConfig + */ + + /** + * Constructs a new SecurityConfig. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a SecurityConfig. + * @implements ISecurityConfig * @constructor - * @param {google.cloud.dataproc.v1.IIdentityConfig=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.ISecurityConfig=} [properties] Properties to set */ - function IdentityConfig(properties) { - this.userServiceAccountMapping = {}; + function SecurityConfig(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -17012,95 +16898,89 @@ } /** - * IdentityConfig userServiceAccountMapping. - * @member {Object.} userServiceAccountMapping - * @memberof google.cloud.dataproc.v1.IdentityConfig + * SecurityConfig kerberosConfig. + * @member {google.cloud.dataproc.v1.IKerberosConfig|null|undefined} kerberosConfig + * @memberof google.cloud.dataproc.v1.SecurityConfig * @instance */ - IdentityConfig.prototype.userServiceAccountMapping = $util.emptyObject; + SecurityConfig.prototype.kerberosConfig = null; /** - * Creates a new IdentityConfig instance using the specified properties. + * SecurityConfig identityConfig. + * @member {google.cloud.dataproc.v1.IIdentityConfig|null|undefined} identityConfig + * @memberof google.cloud.dataproc.v1.SecurityConfig + * @instance + */ + SecurityConfig.prototype.identityConfig = null; + + /** + * Creates a new SecurityConfig instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.IdentityConfig + * @memberof google.cloud.dataproc.v1.SecurityConfig * @static - * @param {google.cloud.dataproc.v1.IIdentityConfig=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.IdentityConfig} IdentityConfig instance + * @param {google.cloud.dataproc.v1.ISecurityConfig=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.SecurityConfig} SecurityConfig instance */ - IdentityConfig.create = function create(properties) { - return new IdentityConfig(properties); + SecurityConfig.create = function create(properties) { + return new SecurityConfig(properties); }; /** - * Encodes the specified IdentityConfig message. Does not implicitly {@link google.cloud.dataproc.v1.IdentityConfig.verify|verify} messages. + * Encodes the specified SecurityConfig message. Does not implicitly {@link google.cloud.dataproc.v1.SecurityConfig.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.IdentityConfig + * @memberof google.cloud.dataproc.v1.SecurityConfig * @static - * @param {google.cloud.dataproc.v1.IIdentityConfig} message IdentityConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.ISecurityConfig} message SecurityConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - IdentityConfig.encode = function encode(message, writer) { + SecurityConfig.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.userServiceAccountMapping != null && Object.hasOwnProperty.call(message, "userServiceAccountMapping")) - for (var keys = Object.keys(message.userServiceAccountMapping), i = 0; i < keys.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.userServiceAccountMapping[keys[i]]).ldelim(); + if (message.kerberosConfig != null && Object.hasOwnProperty.call(message, "kerberosConfig")) + $root.google.cloud.dataproc.v1.KerberosConfig.encode(message.kerberosConfig, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.identityConfig != null && Object.hasOwnProperty.call(message, "identityConfig")) + $root.google.cloud.dataproc.v1.IdentityConfig.encode(message.identityConfig, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified IdentityConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.IdentityConfig.verify|verify} messages. + * Encodes the specified SecurityConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.SecurityConfig.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.IdentityConfig + * @memberof google.cloud.dataproc.v1.SecurityConfig * @static - * @param {google.cloud.dataproc.v1.IIdentityConfig} message IdentityConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.ISecurityConfig} message SecurityConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - IdentityConfig.encodeDelimited = function encodeDelimited(message, writer) { + SecurityConfig.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an IdentityConfig message from the specified reader or buffer. + * Decodes a SecurityConfig message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.IdentityConfig + * @memberof google.cloud.dataproc.v1.SecurityConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.IdentityConfig} IdentityConfig + * @returns {google.cloud.dataproc.v1.SecurityConfig} SecurityConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - IdentityConfig.decode = function decode(reader, length) { + SecurityConfig.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.IdentityConfig(), key, value; + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.SecurityConfig(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (message.userServiceAccountMapping === $util.emptyObject) - message.userServiceAccountMapping = {}; - var end2 = reader.uint32() + reader.pos; - key = ""; - value = ""; - while (reader.pos < end2) { - var tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = reader.string(); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.userServiceAccountMapping[key] = value; + message.kerberosConfig = $root.google.cloud.dataproc.v1.KerberosConfig.decode(reader, reader.uint32()); + break; + } + case 2: { + message.identityConfig = $root.google.cloud.dataproc.v1.IdentityConfig.decode(reader, reader.uint32()); break; } default: @@ -17112,140 +16992,155 @@ }; /** - * Decodes an IdentityConfig message from the specified reader or buffer, length delimited. + * Decodes a SecurityConfig message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.IdentityConfig + * @memberof google.cloud.dataproc.v1.SecurityConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.IdentityConfig} IdentityConfig + * @returns {google.cloud.dataproc.v1.SecurityConfig} SecurityConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - IdentityConfig.decodeDelimited = function decodeDelimited(reader) { + SecurityConfig.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an IdentityConfig message. + * Verifies a SecurityConfig message. * @function verify - * @memberof google.cloud.dataproc.v1.IdentityConfig + * @memberof google.cloud.dataproc.v1.SecurityConfig * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - IdentityConfig.verify = function verify(message) { + SecurityConfig.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.userServiceAccountMapping != null && message.hasOwnProperty("userServiceAccountMapping")) { - if (!$util.isObject(message.userServiceAccountMapping)) - return "userServiceAccountMapping: object expected"; - var key = Object.keys(message.userServiceAccountMapping); - for (var i = 0; i < key.length; ++i) - if (!$util.isString(message.userServiceAccountMapping[key[i]])) - return "userServiceAccountMapping: string{k:string} expected"; + if (message.kerberosConfig != null && message.hasOwnProperty("kerberosConfig")) { + var error = $root.google.cloud.dataproc.v1.KerberosConfig.verify(message.kerberosConfig); + if (error) + return "kerberosConfig." + error; + } + if (message.identityConfig != null && message.hasOwnProperty("identityConfig")) { + var error = $root.google.cloud.dataproc.v1.IdentityConfig.verify(message.identityConfig); + if (error) + return "identityConfig." + error; } return null; }; /** - * Creates an IdentityConfig message from a plain object. Also converts values to their respective internal types. + * Creates a SecurityConfig message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.IdentityConfig + * @memberof google.cloud.dataproc.v1.SecurityConfig * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.IdentityConfig} IdentityConfig + * @returns {google.cloud.dataproc.v1.SecurityConfig} SecurityConfig */ - IdentityConfig.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.IdentityConfig) + SecurityConfig.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.SecurityConfig) return object; - var message = new $root.google.cloud.dataproc.v1.IdentityConfig(); - if (object.userServiceAccountMapping) { - if (typeof object.userServiceAccountMapping !== "object") - throw TypeError(".google.cloud.dataproc.v1.IdentityConfig.userServiceAccountMapping: object expected"); - message.userServiceAccountMapping = {}; - for (var keys = Object.keys(object.userServiceAccountMapping), i = 0; i < keys.length; ++i) - message.userServiceAccountMapping[keys[i]] = String(object.userServiceAccountMapping[keys[i]]); + var message = new $root.google.cloud.dataproc.v1.SecurityConfig(); + if (object.kerberosConfig != null) { + if (typeof object.kerberosConfig !== "object") + throw TypeError(".google.cloud.dataproc.v1.SecurityConfig.kerberosConfig: object expected"); + message.kerberosConfig = $root.google.cloud.dataproc.v1.KerberosConfig.fromObject(object.kerberosConfig); + } + if (object.identityConfig != null) { + if (typeof object.identityConfig !== "object") + throw TypeError(".google.cloud.dataproc.v1.SecurityConfig.identityConfig: object expected"); + message.identityConfig = $root.google.cloud.dataproc.v1.IdentityConfig.fromObject(object.identityConfig); } return message; }; /** - * Creates a plain object from an IdentityConfig message. Also converts values to other types if specified. + * Creates a plain object from a SecurityConfig message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.IdentityConfig + * @memberof google.cloud.dataproc.v1.SecurityConfig * @static - * @param {google.cloud.dataproc.v1.IdentityConfig} message IdentityConfig + * @param {google.cloud.dataproc.v1.SecurityConfig} message SecurityConfig * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - IdentityConfig.toObject = function toObject(message, options) { + SecurityConfig.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.objects || options.defaults) - object.userServiceAccountMapping = {}; - var keys2; - if (message.userServiceAccountMapping && (keys2 = Object.keys(message.userServiceAccountMapping)).length) { - object.userServiceAccountMapping = {}; - for (var j = 0; j < keys2.length; ++j) - object.userServiceAccountMapping[keys2[j]] = message.userServiceAccountMapping[keys2[j]]; + if (options.defaults) { + object.kerberosConfig = null; + object.identityConfig = null; } + if (message.kerberosConfig != null && message.hasOwnProperty("kerberosConfig")) + object.kerberosConfig = $root.google.cloud.dataproc.v1.KerberosConfig.toObject(message.kerberosConfig, options); + if (message.identityConfig != null && message.hasOwnProperty("identityConfig")) + object.identityConfig = $root.google.cloud.dataproc.v1.IdentityConfig.toObject(message.identityConfig, options); return object; }; /** - * Converts this IdentityConfig to JSON. + * Converts this SecurityConfig to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.IdentityConfig + * @memberof google.cloud.dataproc.v1.SecurityConfig * @instance * @returns {Object.} JSON object */ - IdentityConfig.prototype.toJSON = function toJSON() { + SecurityConfig.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for IdentityConfig + * Gets the default type url for SecurityConfig * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.IdentityConfig + * @memberof google.cloud.dataproc.v1.SecurityConfig * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - IdentityConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SecurityConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.IdentityConfig"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.SecurityConfig"; }; - return IdentityConfig; + return SecurityConfig; })(); - v1.SoftwareConfig = (function() { - - /** - * Properties of a SoftwareConfig. - * @memberof google.cloud.dataproc.v1 - * @interface ISoftwareConfig - * @property {string|null} [imageVersion] SoftwareConfig imageVersion - * @property {Object.|null} [properties] SoftwareConfig properties - * @property {Array.|null} [optionalComponents] SoftwareConfig optionalComponents - */ + v1.KerberosConfig = (function() { /** - * Constructs a new SoftwareConfig. + * Properties of a KerberosConfig. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a SoftwareConfig. - * @implements ISoftwareConfig - * @constructor - * @param {google.cloud.dataproc.v1.ISoftwareConfig=} [properties] Properties to set - */ - function SoftwareConfig(properties) { - this.properties = {}; - this.optionalComponents = []; + * @interface IKerberosConfig + * @property {boolean|null} [enableKerberos] KerberosConfig enableKerberos + * @property {string|null} [rootPrincipalPasswordUri] KerberosConfig rootPrincipalPasswordUri + * @property {string|null} [kmsKeyUri] KerberosConfig kmsKeyUri + * @property {string|null} [keystoreUri] KerberosConfig keystoreUri + * @property {string|null} [truststoreUri] KerberosConfig truststoreUri + * @property {string|null} [keystorePasswordUri] KerberosConfig keystorePasswordUri + * @property {string|null} [keyPasswordUri] KerberosConfig keyPasswordUri + * @property {string|null} [truststorePasswordUri] KerberosConfig truststorePasswordUri + * @property {string|null} [crossRealmTrustRealm] KerberosConfig crossRealmTrustRealm + * @property {string|null} [crossRealmTrustKdc] KerberosConfig crossRealmTrustKdc + * @property {string|null} [crossRealmTrustAdminServer] KerberosConfig crossRealmTrustAdminServer + * @property {string|null} [crossRealmTrustSharedPasswordUri] KerberosConfig crossRealmTrustSharedPasswordUri + * @property {string|null} [kdcDbKeyUri] KerberosConfig kdcDbKeyUri + * @property {number|null} [tgtLifetimeHours] KerberosConfig tgtLifetimeHours + * @property {string|null} [realm] KerberosConfig realm + */ + + /** + * Constructs a new KerberosConfig. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a KerberosConfig. + * @implements IKerberosConfig + * @constructor + * @param {google.cloud.dataproc.v1.IKerberosConfig=} [properties] Properties to set + */ + function KerberosConfig(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -17253,134 +17148,271 @@ } /** - * SoftwareConfig imageVersion. - * @member {string} imageVersion - * @memberof google.cloud.dataproc.v1.SoftwareConfig + * KerberosConfig enableKerberos. + * @member {boolean} enableKerberos + * @memberof google.cloud.dataproc.v1.KerberosConfig * @instance */ - SoftwareConfig.prototype.imageVersion = ""; + KerberosConfig.prototype.enableKerberos = false; /** - * SoftwareConfig properties. - * @member {Object.} properties - * @memberof google.cloud.dataproc.v1.SoftwareConfig + * KerberosConfig rootPrincipalPasswordUri. + * @member {string} rootPrincipalPasswordUri + * @memberof google.cloud.dataproc.v1.KerberosConfig * @instance */ - SoftwareConfig.prototype.properties = $util.emptyObject; + KerberosConfig.prototype.rootPrincipalPasswordUri = ""; /** - * SoftwareConfig optionalComponents. - * @member {Array.} optionalComponents - * @memberof google.cloud.dataproc.v1.SoftwareConfig + * KerberosConfig kmsKeyUri. + * @member {string} kmsKeyUri + * @memberof google.cloud.dataproc.v1.KerberosConfig * @instance */ - SoftwareConfig.prototype.optionalComponents = $util.emptyArray; + KerberosConfig.prototype.kmsKeyUri = ""; /** - * Creates a new SoftwareConfig instance using the specified properties. + * KerberosConfig keystoreUri. + * @member {string} keystoreUri + * @memberof google.cloud.dataproc.v1.KerberosConfig + * @instance + */ + KerberosConfig.prototype.keystoreUri = ""; + + /** + * KerberosConfig truststoreUri. + * @member {string} truststoreUri + * @memberof google.cloud.dataproc.v1.KerberosConfig + * @instance + */ + KerberosConfig.prototype.truststoreUri = ""; + + /** + * KerberosConfig keystorePasswordUri. + * @member {string} keystorePasswordUri + * @memberof google.cloud.dataproc.v1.KerberosConfig + * @instance + */ + KerberosConfig.prototype.keystorePasswordUri = ""; + + /** + * KerberosConfig keyPasswordUri. + * @member {string} keyPasswordUri + * @memberof google.cloud.dataproc.v1.KerberosConfig + * @instance + */ + KerberosConfig.prototype.keyPasswordUri = ""; + + /** + * KerberosConfig truststorePasswordUri. + * @member {string} truststorePasswordUri + * @memberof google.cloud.dataproc.v1.KerberosConfig + * @instance + */ + KerberosConfig.prototype.truststorePasswordUri = ""; + + /** + * KerberosConfig crossRealmTrustRealm. + * @member {string} crossRealmTrustRealm + * @memberof google.cloud.dataproc.v1.KerberosConfig + * @instance + */ + KerberosConfig.prototype.crossRealmTrustRealm = ""; + + /** + * KerberosConfig crossRealmTrustKdc. + * @member {string} crossRealmTrustKdc + * @memberof google.cloud.dataproc.v1.KerberosConfig + * @instance + */ + KerberosConfig.prototype.crossRealmTrustKdc = ""; + + /** + * KerberosConfig crossRealmTrustAdminServer. + * @member {string} crossRealmTrustAdminServer + * @memberof google.cloud.dataproc.v1.KerberosConfig + * @instance + */ + KerberosConfig.prototype.crossRealmTrustAdminServer = ""; + + /** + * KerberosConfig crossRealmTrustSharedPasswordUri. + * @member {string} crossRealmTrustSharedPasswordUri + * @memberof google.cloud.dataproc.v1.KerberosConfig + * @instance + */ + KerberosConfig.prototype.crossRealmTrustSharedPasswordUri = ""; + + /** + * KerberosConfig kdcDbKeyUri. + * @member {string} kdcDbKeyUri + * @memberof google.cloud.dataproc.v1.KerberosConfig + * @instance + */ + KerberosConfig.prototype.kdcDbKeyUri = ""; + + /** + * KerberosConfig tgtLifetimeHours. + * @member {number} tgtLifetimeHours + * @memberof google.cloud.dataproc.v1.KerberosConfig + * @instance + */ + KerberosConfig.prototype.tgtLifetimeHours = 0; + + /** + * KerberosConfig realm. + * @member {string} realm + * @memberof google.cloud.dataproc.v1.KerberosConfig + * @instance + */ + KerberosConfig.prototype.realm = ""; + + /** + * Creates a new KerberosConfig instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.SoftwareConfig + * @memberof google.cloud.dataproc.v1.KerberosConfig * @static - * @param {google.cloud.dataproc.v1.ISoftwareConfig=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.SoftwareConfig} SoftwareConfig instance + * @param {google.cloud.dataproc.v1.IKerberosConfig=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.KerberosConfig} KerberosConfig instance */ - SoftwareConfig.create = function create(properties) { - return new SoftwareConfig(properties); + KerberosConfig.create = function create(properties) { + return new KerberosConfig(properties); }; /** - * Encodes the specified SoftwareConfig message. Does not implicitly {@link google.cloud.dataproc.v1.SoftwareConfig.verify|verify} messages. + * Encodes the specified KerberosConfig message. Does not implicitly {@link google.cloud.dataproc.v1.KerberosConfig.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.SoftwareConfig + * @memberof google.cloud.dataproc.v1.KerberosConfig * @static - * @param {google.cloud.dataproc.v1.ISoftwareConfig} message SoftwareConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.IKerberosConfig} message KerberosConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SoftwareConfig.encode = function encode(message, writer) { + KerberosConfig.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.imageVersion != null && Object.hasOwnProperty.call(message, "imageVersion")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.imageVersion); - if (message.properties != null && Object.hasOwnProperty.call(message, "properties")) - for (var keys = Object.keys(message.properties), i = 0; i < keys.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.properties[keys[i]]).ldelim(); - if (message.optionalComponents != null && message.optionalComponents.length) { - writer.uint32(/* id 3, wireType 2 =*/26).fork(); - for (var i = 0; i < message.optionalComponents.length; ++i) - writer.int32(message.optionalComponents[i]); - writer.ldelim(); - } + if (message.enableKerberos != null && Object.hasOwnProperty.call(message, "enableKerberos")) + writer.uint32(/* id 1, wireType 0 =*/8).bool(message.enableKerberos); + if (message.rootPrincipalPasswordUri != null && Object.hasOwnProperty.call(message, "rootPrincipalPasswordUri")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.rootPrincipalPasswordUri); + if (message.kmsKeyUri != null && Object.hasOwnProperty.call(message, "kmsKeyUri")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.kmsKeyUri); + if (message.keystoreUri != null && Object.hasOwnProperty.call(message, "keystoreUri")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.keystoreUri); + if (message.truststoreUri != null && Object.hasOwnProperty.call(message, "truststoreUri")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.truststoreUri); + if (message.keystorePasswordUri != null && Object.hasOwnProperty.call(message, "keystorePasswordUri")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.keystorePasswordUri); + if (message.keyPasswordUri != null && Object.hasOwnProperty.call(message, "keyPasswordUri")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.keyPasswordUri); + if (message.truststorePasswordUri != null && Object.hasOwnProperty.call(message, "truststorePasswordUri")) + writer.uint32(/* id 8, wireType 2 =*/66).string(message.truststorePasswordUri); + if (message.crossRealmTrustRealm != null && Object.hasOwnProperty.call(message, "crossRealmTrustRealm")) + writer.uint32(/* id 9, wireType 2 =*/74).string(message.crossRealmTrustRealm); + if (message.crossRealmTrustKdc != null && Object.hasOwnProperty.call(message, "crossRealmTrustKdc")) + writer.uint32(/* id 10, wireType 2 =*/82).string(message.crossRealmTrustKdc); + if (message.crossRealmTrustAdminServer != null && Object.hasOwnProperty.call(message, "crossRealmTrustAdminServer")) + writer.uint32(/* id 11, wireType 2 =*/90).string(message.crossRealmTrustAdminServer); + if (message.crossRealmTrustSharedPasswordUri != null && Object.hasOwnProperty.call(message, "crossRealmTrustSharedPasswordUri")) + writer.uint32(/* id 12, wireType 2 =*/98).string(message.crossRealmTrustSharedPasswordUri); + if (message.kdcDbKeyUri != null && Object.hasOwnProperty.call(message, "kdcDbKeyUri")) + writer.uint32(/* id 13, wireType 2 =*/106).string(message.kdcDbKeyUri); + if (message.tgtLifetimeHours != null && Object.hasOwnProperty.call(message, "tgtLifetimeHours")) + writer.uint32(/* id 14, wireType 0 =*/112).int32(message.tgtLifetimeHours); + if (message.realm != null && Object.hasOwnProperty.call(message, "realm")) + writer.uint32(/* id 15, wireType 2 =*/122).string(message.realm); return writer; }; /** - * Encodes the specified SoftwareConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.SoftwareConfig.verify|verify} messages. + * Encodes the specified KerberosConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.KerberosConfig.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.SoftwareConfig + * @memberof google.cloud.dataproc.v1.KerberosConfig * @static - * @param {google.cloud.dataproc.v1.ISoftwareConfig} message SoftwareConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.IKerberosConfig} message KerberosConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SoftwareConfig.encodeDelimited = function encodeDelimited(message, writer) { + KerberosConfig.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SoftwareConfig message from the specified reader or buffer. + * Decodes a KerberosConfig message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.SoftwareConfig + * @memberof google.cloud.dataproc.v1.KerberosConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.SoftwareConfig} SoftwareConfig + * @returns {google.cloud.dataproc.v1.KerberosConfig} KerberosConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SoftwareConfig.decode = function decode(reader, length) { + KerberosConfig.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.SoftwareConfig(), key, value; + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.KerberosConfig(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.imageVersion = reader.string(); + message.enableKerberos = reader.bool(); break; } case 2: { - if (message.properties === $util.emptyObject) - message.properties = {}; - var end2 = reader.uint32() + reader.pos; - key = ""; - value = ""; - while (reader.pos < end2) { - var tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = reader.string(); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.properties[key] = value; + message.rootPrincipalPasswordUri = reader.string(); break; } case 3: { - if (!(message.optionalComponents && message.optionalComponents.length)) - message.optionalComponents = []; - if ((tag & 7) === 2) { - var end2 = reader.uint32() + reader.pos; - while (reader.pos < end2) - message.optionalComponents.push(reader.int32()); - } else - message.optionalComponents.push(reader.int32()); + message.kmsKeyUri = reader.string(); + break; + } + case 4: { + message.keystoreUri = reader.string(); + break; + } + case 5: { + message.truststoreUri = reader.string(); + break; + } + case 6: { + message.keystorePasswordUri = reader.string(); + break; + } + case 7: { + message.keyPasswordUri = reader.string(); + break; + } + case 8: { + message.truststorePasswordUri = reader.string(); + break; + } + case 9: { + message.crossRealmTrustRealm = reader.string(); + break; + } + case 10: { + message.crossRealmTrustKdc = reader.string(); + break; + } + case 11: { + message.crossRealmTrustAdminServer = reader.string(); + break; + } + case 12: { + message.crossRealmTrustSharedPasswordUri = reader.string(); + break; + } + case 13: { + message.kdcDbKeyUri = reader.string(); + break; + } + case 14: { + message.tgtLifetimeHours = reader.int32(); + break; + } + case 15: { + message.realm = reader.string(); break; } default: @@ -17392,243 +17424,236 @@ }; /** - * Decodes a SoftwareConfig message from the specified reader or buffer, length delimited. + * Decodes a KerberosConfig message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.SoftwareConfig + * @memberof google.cloud.dataproc.v1.KerberosConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.SoftwareConfig} SoftwareConfig + * @returns {google.cloud.dataproc.v1.KerberosConfig} KerberosConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SoftwareConfig.decodeDelimited = function decodeDelimited(reader) { + KerberosConfig.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SoftwareConfig message. + * Verifies a KerberosConfig message. * @function verify - * @memberof google.cloud.dataproc.v1.SoftwareConfig + * @memberof google.cloud.dataproc.v1.KerberosConfig * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SoftwareConfig.verify = function verify(message) { + KerberosConfig.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.imageVersion != null && message.hasOwnProperty("imageVersion")) - if (!$util.isString(message.imageVersion)) - return "imageVersion: string expected"; - if (message.properties != null && message.hasOwnProperty("properties")) { - if (!$util.isObject(message.properties)) - return "properties: object expected"; - var key = Object.keys(message.properties); - for (var i = 0; i < key.length; ++i) - if (!$util.isString(message.properties[key[i]])) - return "properties: string{k:string} expected"; - } - if (message.optionalComponents != null && message.hasOwnProperty("optionalComponents")) { - if (!Array.isArray(message.optionalComponents)) - return "optionalComponents: array expected"; - for (var i = 0; i < message.optionalComponents.length; ++i) - switch (message.optionalComponents[i]) { - default: - return "optionalComponents: enum value[] expected"; - case 0: - case 5: - case 13: - case 9: - case 14: - case 11: - case 3: - case 1: - case 6: - case 12: - case 10: - case 4: - case 8: - break; - } - } + if (message.enableKerberos != null && message.hasOwnProperty("enableKerberos")) + if (typeof message.enableKerberos !== "boolean") + return "enableKerberos: boolean expected"; + if (message.rootPrincipalPasswordUri != null && message.hasOwnProperty("rootPrincipalPasswordUri")) + if (!$util.isString(message.rootPrincipalPasswordUri)) + return "rootPrincipalPasswordUri: string expected"; + if (message.kmsKeyUri != null && message.hasOwnProperty("kmsKeyUri")) + if (!$util.isString(message.kmsKeyUri)) + return "kmsKeyUri: string expected"; + if (message.keystoreUri != null && message.hasOwnProperty("keystoreUri")) + if (!$util.isString(message.keystoreUri)) + return "keystoreUri: string expected"; + if (message.truststoreUri != null && message.hasOwnProperty("truststoreUri")) + if (!$util.isString(message.truststoreUri)) + return "truststoreUri: string expected"; + if (message.keystorePasswordUri != null && message.hasOwnProperty("keystorePasswordUri")) + if (!$util.isString(message.keystorePasswordUri)) + return "keystorePasswordUri: string expected"; + if (message.keyPasswordUri != null && message.hasOwnProperty("keyPasswordUri")) + if (!$util.isString(message.keyPasswordUri)) + return "keyPasswordUri: string expected"; + if (message.truststorePasswordUri != null && message.hasOwnProperty("truststorePasswordUri")) + if (!$util.isString(message.truststorePasswordUri)) + return "truststorePasswordUri: string expected"; + if (message.crossRealmTrustRealm != null && message.hasOwnProperty("crossRealmTrustRealm")) + if (!$util.isString(message.crossRealmTrustRealm)) + return "crossRealmTrustRealm: string expected"; + if (message.crossRealmTrustKdc != null && message.hasOwnProperty("crossRealmTrustKdc")) + if (!$util.isString(message.crossRealmTrustKdc)) + return "crossRealmTrustKdc: string expected"; + if (message.crossRealmTrustAdminServer != null && message.hasOwnProperty("crossRealmTrustAdminServer")) + if (!$util.isString(message.crossRealmTrustAdminServer)) + return "crossRealmTrustAdminServer: string expected"; + if (message.crossRealmTrustSharedPasswordUri != null && message.hasOwnProperty("crossRealmTrustSharedPasswordUri")) + if (!$util.isString(message.crossRealmTrustSharedPasswordUri)) + return "crossRealmTrustSharedPasswordUri: string expected"; + if (message.kdcDbKeyUri != null && message.hasOwnProperty("kdcDbKeyUri")) + if (!$util.isString(message.kdcDbKeyUri)) + return "kdcDbKeyUri: string expected"; + if (message.tgtLifetimeHours != null && message.hasOwnProperty("tgtLifetimeHours")) + if (!$util.isInteger(message.tgtLifetimeHours)) + return "tgtLifetimeHours: integer expected"; + if (message.realm != null && message.hasOwnProperty("realm")) + if (!$util.isString(message.realm)) + return "realm: string expected"; return null; }; /** - * Creates a SoftwareConfig message from a plain object. Also converts values to their respective internal types. + * Creates a KerberosConfig message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.SoftwareConfig + * @memberof google.cloud.dataproc.v1.KerberosConfig * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.SoftwareConfig} SoftwareConfig + * @returns {google.cloud.dataproc.v1.KerberosConfig} KerberosConfig */ - SoftwareConfig.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.SoftwareConfig) + KerberosConfig.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.KerberosConfig) return object; - var message = new $root.google.cloud.dataproc.v1.SoftwareConfig(); - if (object.imageVersion != null) - message.imageVersion = String(object.imageVersion); - if (object.properties) { - if (typeof object.properties !== "object") - throw TypeError(".google.cloud.dataproc.v1.SoftwareConfig.properties: object expected"); - message.properties = {}; - for (var keys = Object.keys(object.properties), i = 0; i < keys.length; ++i) - message.properties[keys[i]] = String(object.properties[keys[i]]); - } - if (object.optionalComponents) { - if (!Array.isArray(object.optionalComponents)) - throw TypeError(".google.cloud.dataproc.v1.SoftwareConfig.optionalComponents: array expected"); - message.optionalComponents = []; - for (var i = 0; i < object.optionalComponents.length; ++i) - switch (object.optionalComponents[i]) { - default: - if (typeof object.optionalComponents[i] === "number") { - message.optionalComponents[i] = object.optionalComponents[i]; - break; - } - case "COMPONENT_UNSPECIFIED": - case 0: - message.optionalComponents[i] = 0; - break; - case "ANACONDA": - case 5: - message.optionalComponents[i] = 5; - break; - case "DOCKER": - case 13: - message.optionalComponents[i] = 13; - break; - case "DRUID": - case 9: - message.optionalComponents[i] = 9; - break; - case "FLINK": - case 14: - message.optionalComponents[i] = 14; - break; - case "HBASE": - case 11: - message.optionalComponents[i] = 11; - break; - case "HIVE_WEBHCAT": - case 3: - message.optionalComponents[i] = 3; - break; - case "JUPYTER": - case 1: - message.optionalComponents[i] = 1; - break; - case "PRESTO": - case 6: - message.optionalComponents[i] = 6; - break; - case "RANGER": - case 12: - message.optionalComponents[i] = 12; - break; - case "SOLR": - case 10: - message.optionalComponents[i] = 10; - break; - case "ZEPPELIN": - case 4: - message.optionalComponents[i] = 4; - break; - case "ZOOKEEPER": - case 8: - message.optionalComponents[i] = 8; - break; - } - } - return message; - }; - - /** - * Creates a plain object from a SoftwareConfig message. Also converts values to other types if specified. - * @function toObject - * @memberof google.cloud.dataproc.v1.SoftwareConfig - * @static - * @param {google.cloud.dataproc.v1.SoftwareConfig} message SoftwareConfig - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - SoftwareConfig.toObject = function toObject(message, options) { - if (!options) - options = {}; - var object = {}; - if (options.arrays || options.defaults) - object.optionalComponents = []; - if (options.objects || options.defaults) - object.properties = {}; - if (options.defaults) - object.imageVersion = ""; - if (message.imageVersion != null && message.hasOwnProperty("imageVersion")) - object.imageVersion = message.imageVersion; - var keys2; - if (message.properties && (keys2 = Object.keys(message.properties)).length) { - object.properties = {}; - for (var j = 0; j < keys2.length; ++j) - object.properties[keys2[j]] = message.properties[keys2[j]]; - } - if (message.optionalComponents && message.optionalComponents.length) { - object.optionalComponents = []; - for (var j = 0; j < message.optionalComponents.length; ++j) - object.optionalComponents[j] = options.enums === String ? $root.google.cloud.dataproc.v1.Component[message.optionalComponents[j]] === undefined ? message.optionalComponents[j] : $root.google.cloud.dataproc.v1.Component[message.optionalComponents[j]] : message.optionalComponents[j]; + var message = new $root.google.cloud.dataproc.v1.KerberosConfig(); + if (object.enableKerberos != null) + message.enableKerberos = Boolean(object.enableKerberos); + if (object.rootPrincipalPasswordUri != null) + message.rootPrincipalPasswordUri = String(object.rootPrincipalPasswordUri); + if (object.kmsKeyUri != null) + message.kmsKeyUri = String(object.kmsKeyUri); + if (object.keystoreUri != null) + message.keystoreUri = String(object.keystoreUri); + if (object.truststoreUri != null) + message.truststoreUri = String(object.truststoreUri); + if (object.keystorePasswordUri != null) + message.keystorePasswordUri = String(object.keystorePasswordUri); + if (object.keyPasswordUri != null) + message.keyPasswordUri = String(object.keyPasswordUri); + if (object.truststorePasswordUri != null) + message.truststorePasswordUri = String(object.truststorePasswordUri); + if (object.crossRealmTrustRealm != null) + message.crossRealmTrustRealm = String(object.crossRealmTrustRealm); + if (object.crossRealmTrustKdc != null) + message.crossRealmTrustKdc = String(object.crossRealmTrustKdc); + if (object.crossRealmTrustAdminServer != null) + message.crossRealmTrustAdminServer = String(object.crossRealmTrustAdminServer); + if (object.crossRealmTrustSharedPasswordUri != null) + message.crossRealmTrustSharedPasswordUri = String(object.crossRealmTrustSharedPasswordUri); + if (object.kdcDbKeyUri != null) + message.kdcDbKeyUri = String(object.kdcDbKeyUri); + if (object.tgtLifetimeHours != null) + message.tgtLifetimeHours = object.tgtLifetimeHours | 0; + if (object.realm != null) + message.realm = String(object.realm); + return message; + }; + + /** + * Creates a plain object from a KerberosConfig message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1.KerberosConfig + * @static + * @param {google.cloud.dataproc.v1.KerberosConfig} message KerberosConfig + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + KerberosConfig.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.enableKerberos = false; + object.rootPrincipalPasswordUri = ""; + object.kmsKeyUri = ""; + object.keystoreUri = ""; + object.truststoreUri = ""; + object.keystorePasswordUri = ""; + object.keyPasswordUri = ""; + object.truststorePasswordUri = ""; + object.crossRealmTrustRealm = ""; + object.crossRealmTrustKdc = ""; + object.crossRealmTrustAdminServer = ""; + object.crossRealmTrustSharedPasswordUri = ""; + object.kdcDbKeyUri = ""; + object.tgtLifetimeHours = 0; + object.realm = ""; } + if (message.enableKerberos != null && message.hasOwnProperty("enableKerberos")) + object.enableKerberos = message.enableKerberos; + if (message.rootPrincipalPasswordUri != null && message.hasOwnProperty("rootPrincipalPasswordUri")) + object.rootPrincipalPasswordUri = message.rootPrincipalPasswordUri; + if (message.kmsKeyUri != null && message.hasOwnProperty("kmsKeyUri")) + object.kmsKeyUri = message.kmsKeyUri; + if (message.keystoreUri != null && message.hasOwnProperty("keystoreUri")) + object.keystoreUri = message.keystoreUri; + if (message.truststoreUri != null && message.hasOwnProperty("truststoreUri")) + object.truststoreUri = message.truststoreUri; + if (message.keystorePasswordUri != null && message.hasOwnProperty("keystorePasswordUri")) + object.keystorePasswordUri = message.keystorePasswordUri; + if (message.keyPasswordUri != null && message.hasOwnProperty("keyPasswordUri")) + object.keyPasswordUri = message.keyPasswordUri; + if (message.truststorePasswordUri != null && message.hasOwnProperty("truststorePasswordUri")) + object.truststorePasswordUri = message.truststorePasswordUri; + if (message.crossRealmTrustRealm != null && message.hasOwnProperty("crossRealmTrustRealm")) + object.crossRealmTrustRealm = message.crossRealmTrustRealm; + if (message.crossRealmTrustKdc != null && message.hasOwnProperty("crossRealmTrustKdc")) + object.crossRealmTrustKdc = message.crossRealmTrustKdc; + if (message.crossRealmTrustAdminServer != null && message.hasOwnProperty("crossRealmTrustAdminServer")) + object.crossRealmTrustAdminServer = message.crossRealmTrustAdminServer; + if (message.crossRealmTrustSharedPasswordUri != null && message.hasOwnProperty("crossRealmTrustSharedPasswordUri")) + object.crossRealmTrustSharedPasswordUri = message.crossRealmTrustSharedPasswordUri; + if (message.kdcDbKeyUri != null && message.hasOwnProperty("kdcDbKeyUri")) + object.kdcDbKeyUri = message.kdcDbKeyUri; + if (message.tgtLifetimeHours != null && message.hasOwnProperty("tgtLifetimeHours")) + object.tgtLifetimeHours = message.tgtLifetimeHours; + if (message.realm != null && message.hasOwnProperty("realm")) + object.realm = message.realm; return object; }; /** - * Converts this SoftwareConfig to JSON. + * Converts this KerberosConfig to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.SoftwareConfig + * @memberof google.cloud.dataproc.v1.KerberosConfig * @instance * @returns {Object.} JSON object */ - SoftwareConfig.prototype.toJSON = function toJSON() { + KerberosConfig.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SoftwareConfig + * Gets the default type url for KerberosConfig * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.SoftwareConfig + * @memberof google.cloud.dataproc.v1.KerberosConfig * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SoftwareConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + KerberosConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.SoftwareConfig"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.KerberosConfig"; }; - return SoftwareConfig; + return KerberosConfig; })(); - v1.LifecycleConfig = (function() { + v1.IdentityConfig = (function() { /** - * Properties of a LifecycleConfig. + * Properties of an IdentityConfig. * @memberof google.cloud.dataproc.v1 - * @interface ILifecycleConfig - * @property {google.protobuf.IDuration|null} [idleDeleteTtl] LifecycleConfig idleDeleteTtl - * @property {google.protobuf.ITimestamp|null} [autoDeleteTime] LifecycleConfig autoDeleteTime - * @property {google.protobuf.IDuration|null} [autoDeleteTtl] LifecycleConfig autoDeleteTtl - * @property {google.protobuf.ITimestamp|null} [idleStartTime] LifecycleConfig idleStartTime + * @interface IIdentityConfig + * @property {Object.|null} [userServiceAccountMapping] IdentityConfig userServiceAccountMapping */ /** - * Constructs a new LifecycleConfig. + * Constructs a new IdentityConfig. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a LifecycleConfig. - * @implements ILifecycleConfig + * @classdesc Represents an IdentityConfig. + * @implements IIdentityConfig * @constructor - * @param {google.cloud.dataproc.v1.ILifecycleConfig=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IIdentityConfig=} [properties] Properties to set */ - function LifecycleConfig(properties) { + function IdentityConfig(properties) { + this.userServiceAccountMapping = {}; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -17636,131 +17661,95 @@ } /** - * LifecycleConfig idleDeleteTtl. - * @member {google.protobuf.IDuration|null|undefined} idleDeleteTtl - * @memberof google.cloud.dataproc.v1.LifecycleConfig - * @instance - */ - LifecycleConfig.prototype.idleDeleteTtl = null; - - /** - * LifecycleConfig autoDeleteTime. - * @member {google.protobuf.ITimestamp|null|undefined} autoDeleteTime - * @memberof google.cloud.dataproc.v1.LifecycleConfig - * @instance - */ - LifecycleConfig.prototype.autoDeleteTime = null; - - /** - * LifecycleConfig autoDeleteTtl. - * @member {google.protobuf.IDuration|null|undefined} autoDeleteTtl - * @memberof google.cloud.dataproc.v1.LifecycleConfig - * @instance - */ - LifecycleConfig.prototype.autoDeleteTtl = null; - - /** - * LifecycleConfig idleStartTime. - * @member {google.protobuf.ITimestamp|null|undefined} idleStartTime - * @memberof google.cloud.dataproc.v1.LifecycleConfig - * @instance - */ - LifecycleConfig.prototype.idleStartTime = null; - - // OneOf field names bound to virtual getters and setters - var $oneOfFields; - - /** - * LifecycleConfig ttl. - * @member {"autoDeleteTime"|"autoDeleteTtl"|undefined} ttl - * @memberof google.cloud.dataproc.v1.LifecycleConfig + * IdentityConfig userServiceAccountMapping. + * @member {Object.} userServiceAccountMapping + * @memberof google.cloud.dataproc.v1.IdentityConfig * @instance */ - Object.defineProperty(LifecycleConfig.prototype, "ttl", { - get: $util.oneOfGetter($oneOfFields = ["autoDeleteTime", "autoDeleteTtl"]), - set: $util.oneOfSetter($oneOfFields) - }); + IdentityConfig.prototype.userServiceAccountMapping = $util.emptyObject; /** - * Creates a new LifecycleConfig instance using the specified properties. + * Creates a new IdentityConfig instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @memberof google.cloud.dataproc.v1.IdentityConfig * @static - * @param {google.cloud.dataproc.v1.ILifecycleConfig=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.LifecycleConfig} LifecycleConfig instance + * @param {google.cloud.dataproc.v1.IIdentityConfig=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.IdentityConfig} IdentityConfig instance */ - LifecycleConfig.create = function create(properties) { - return new LifecycleConfig(properties); + IdentityConfig.create = function create(properties) { + return new IdentityConfig(properties); }; /** - * Encodes the specified LifecycleConfig message. Does not implicitly {@link google.cloud.dataproc.v1.LifecycleConfig.verify|verify} messages. + * Encodes the specified IdentityConfig message. Does not implicitly {@link google.cloud.dataproc.v1.IdentityConfig.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @memberof google.cloud.dataproc.v1.IdentityConfig * @static - * @param {google.cloud.dataproc.v1.ILifecycleConfig} message LifecycleConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.IIdentityConfig} message IdentityConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LifecycleConfig.encode = function encode(message, writer) { + IdentityConfig.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.idleDeleteTtl != null && Object.hasOwnProperty.call(message, "idleDeleteTtl")) - $root.google.protobuf.Duration.encode(message.idleDeleteTtl, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.autoDeleteTime != null && Object.hasOwnProperty.call(message, "autoDeleteTime")) - $root.google.protobuf.Timestamp.encode(message.autoDeleteTime, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.autoDeleteTtl != null && Object.hasOwnProperty.call(message, "autoDeleteTtl")) - $root.google.protobuf.Duration.encode(message.autoDeleteTtl, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.idleStartTime != null && Object.hasOwnProperty.call(message, "idleStartTime")) - $root.google.protobuf.Timestamp.encode(message.idleStartTime, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.userServiceAccountMapping != null && Object.hasOwnProperty.call(message, "userServiceAccountMapping")) + for (var keys = Object.keys(message.userServiceAccountMapping), i = 0; i < keys.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.userServiceAccountMapping[keys[i]]).ldelim(); return writer; }; /** - * Encodes the specified LifecycleConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.LifecycleConfig.verify|verify} messages. + * Encodes the specified IdentityConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.IdentityConfig.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @memberof google.cloud.dataproc.v1.IdentityConfig * @static - * @param {google.cloud.dataproc.v1.ILifecycleConfig} message LifecycleConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.IIdentityConfig} message IdentityConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LifecycleConfig.encodeDelimited = function encodeDelimited(message, writer) { + IdentityConfig.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a LifecycleConfig message from the specified reader or buffer. + * Decodes an IdentityConfig message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @memberof google.cloud.dataproc.v1.IdentityConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.LifecycleConfig} LifecycleConfig + * @returns {google.cloud.dataproc.v1.IdentityConfig} IdentityConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LifecycleConfig.decode = function decode(reader, length) { + IdentityConfig.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.LifecycleConfig(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.IdentityConfig(), key, value; while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.idleDeleteTtl = $root.google.protobuf.Duration.decode(reader, reader.uint32()); - break; - } - case 2: { - message.autoDeleteTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); - break; - } - case 3: { - message.autoDeleteTtl = $root.google.protobuf.Duration.decode(reader, reader.uint32()); - break; - } - case 4: { - message.idleStartTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + if (message.userServiceAccountMapping === $util.emptyObject) + message.userServiceAccountMapping = {}; + var end2 = reader.uint32() + reader.pos; + key = ""; + value = ""; + while (reader.pos < end2) { + var tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.string(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.userServiceAccountMapping[key] = value; break; } default: @@ -17772,180 +17761,140 @@ }; /** - * Decodes a LifecycleConfig message from the specified reader or buffer, length delimited. + * Decodes an IdentityConfig message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @memberof google.cloud.dataproc.v1.IdentityConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.LifecycleConfig} LifecycleConfig + * @returns {google.cloud.dataproc.v1.IdentityConfig} IdentityConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LifecycleConfig.decodeDelimited = function decodeDelimited(reader) { + IdentityConfig.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a LifecycleConfig message. + * Verifies an IdentityConfig message. * @function verify - * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @memberof google.cloud.dataproc.v1.IdentityConfig * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - LifecycleConfig.verify = function verify(message) { + IdentityConfig.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - var properties = {}; - if (message.idleDeleteTtl != null && message.hasOwnProperty("idleDeleteTtl")) { - var error = $root.google.protobuf.Duration.verify(message.idleDeleteTtl); - if (error) - return "idleDeleteTtl." + error; - } - if (message.autoDeleteTime != null && message.hasOwnProperty("autoDeleteTime")) { - properties.ttl = 1; - { - var error = $root.google.protobuf.Timestamp.verify(message.autoDeleteTime); - if (error) - return "autoDeleteTime." + error; - } - } - if (message.autoDeleteTtl != null && message.hasOwnProperty("autoDeleteTtl")) { - if (properties.ttl === 1) - return "ttl: multiple values"; - properties.ttl = 1; - { - var error = $root.google.protobuf.Duration.verify(message.autoDeleteTtl); - if (error) - return "autoDeleteTtl." + error; - } - } - if (message.idleStartTime != null && message.hasOwnProperty("idleStartTime")) { - var error = $root.google.protobuf.Timestamp.verify(message.idleStartTime); - if (error) - return "idleStartTime." + error; + if (message.userServiceAccountMapping != null && message.hasOwnProperty("userServiceAccountMapping")) { + if (!$util.isObject(message.userServiceAccountMapping)) + return "userServiceAccountMapping: object expected"; + var key = Object.keys(message.userServiceAccountMapping); + for (var i = 0; i < key.length; ++i) + if (!$util.isString(message.userServiceAccountMapping[key[i]])) + return "userServiceAccountMapping: string{k:string} expected"; } return null; }; /** - * Creates a LifecycleConfig message from a plain object. Also converts values to their respective internal types. + * Creates an IdentityConfig message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @memberof google.cloud.dataproc.v1.IdentityConfig * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.LifecycleConfig} LifecycleConfig + * @returns {google.cloud.dataproc.v1.IdentityConfig} IdentityConfig */ - LifecycleConfig.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.LifecycleConfig) + IdentityConfig.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.IdentityConfig) return object; - var message = new $root.google.cloud.dataproc.v1.LifecycleConfig(); - if (object.idleDeleteTtl != null) { - if (typeof object.idleDeleteTtl !== "object") - throw TypeError(".google.cloud.dataproc.v1.LifecycleConfig.idleDeleteTtl: object expected"); - message.idleDeleteTtl = $root.google.protobuf.Duration.fromObject(object.idleDeleteTtl); - } - if (object.autoDeleteTime != null) { - if (typeof object.autoDeleteTime !== "object") - throw TypeError(".google.cloud.dataproc.v1.LifecycleConfig.autoDeleteTime: object expected"); - message.autoDeleteTime = $root.google.protobuf.Timestamp.fromObject(object.autoDeleteTime); - } - if (object.autoDeleteTtl != null) { - if (typeof object.autoDeleteTtl !== "object") - throw TypeError(".google.cloud.dataproc.v1.LifecycleConfig.autoDeleteTtl: object expected"); - message.autoDeleteTtl = $root.google.protobuf.Duration.fromObject(object.autoDeleteTtl); - } - if (object.idleStartTime != null) { - if (typeof object.idleStartTime !== "object") - throw TypeError(".google.cloud.dataproc.v1.LifecycleConfig.idleStartTime: object expected"); - message.idleStartTime = $root.google.protobuf.Timestamp.fromObject(object.idleStartTime); + var message = new $root.google.cloud.dataproc.v1.IdentityConfig(); + if (object.userServiceAccountMapping) { + if (typeof object.userServiceAccountMapping !== "object") + throw TypeError(".google.cloud.dataproc.v1.IdentityConfig.userServiceAccountMapping: object expected"); + message.userServiceAccountMapping = {}; + for (var keys = Object.keys(object.userServiceAccountMapping), i = 0; i < keys.length; ++i) + message.userServiceAccountMapping[keys[i]] = String(object.userServiceAccountMapping[keys[i]]); } return message; }; /** - * Creates a plain object from a LifecycleConfig message. Also converts values to other types if specified. + * Creates a plain object from an IdentityConfig message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @memberof google.cloud.dataproc.v1.IdentityConfig * @static - * @param {google.cloud.dataproc.v1.LifecycleConfig} message LifecycleConfig + * @param {google.cloud.dataproc.v1.IdentityConfig} message IdentityConfig * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - LifecycleConfig.toObject = function toObject(message, options) { + IdentityConfig.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.defaults) { - object.idleDeleteTtl = null; - object.idleStartTime = null; - } - if (message.idleDeleteTtl != null && message.hasOwnProperty("idleDeleteTtl")) - object.idleDeleteTtl = $root.google.protobuf.Duration.toObject(message.idleDeleteTtl, options); - if (message.autoDeleteTime != null && message.hasOwnProperty("autoDeleteTime")) { - object.autoDeleteTime = $root.google.protobuf.Timestamp.toObject(message.autoDeleteTime, options); - if (options.oneofs) - object.ttl = "autoDeleteTime"; - } - if (message.autoDeleteTtl != null && message.hasOwnProperty("autoDeleteTtl")) { - object.autoDeleteTtl = $root.google.protobuf.Duration.toObject(message.autoDeleteTtl, options); - if (options.oneofs) - object.ttl = "autoDeleteTtl"; + if (options.objects || options.defaults) + object.userServiceAccountMapping = {}; + var keys2; + if (message.userServiceAccountMapping && (keys2 = Object.keys(message.userServiceAccountMapping)).length) { + object.userServiceAccountMapping = {}; + for (var j = 0; j < keys2.length; ++j) + object.userServiceAccountMapping[keys2[j]] = message.userServiceAccountMapping[keys2[j]]; } - if (message.idleStartTime != null && message.hasOwnProperty("idleStartTime")) - object.idleStartTime = $root.google.protobuf.Timestamp.toObject(message.idleStartTime, options); return object; }; /** - * Converts this LifecycleConfig to JSON. + * Converts this IdentityConfig to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @memberof google.cloud.dataproc.v1.IdentityConfig * @instance * @returns {Object.} JSON object */ - LifecycleConfig.prototype.toJSON = function toJSON() { + IdentityConfig.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for LifecycleConfig + * Gets the default type url for IdentityConfig * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @memberof google.cloud.dataproc.v1.IdentityConfig * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - LifecycleConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + IdentityConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.LifecycleConfig"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.IdentityConfig"; }; - return LifecycleConfig; + return IdentityConfig; })(); - v1.MetastoreConfig = (function() { + v1.SoftwareConfig = (function() { /** - * Properties of a MetastoreConfig. + * Properties of a SoftwareConfig. * @memberof google.cloud.dataproc.v1 - * @interface IMetastoreConfig - * @property {string|null} [dataprocMetastoreService] MetastoreConfig dataprocMetastoreService + * @interface ISoftwareConfig + * @property {string|null} [imageVersion] SoftwareConfig imageVersion + * @property {Object.|null} [properties] SoftwareConfig properties + * @property {Array.|null} [optionalComponents] SoftwareConfig optionalComponents */ /** - * Constructs a new MetastoreConfig. + * Constructs a new SoftwareConfig. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a MetastoreConfig. - * @implements IMetastoreConfig + * @classdesc Represents a SoftwareConfig. + * @implements ISoftwareConfig * @constructor - * @param {google.cloud.dataproc.v1.IMetastoreConfig=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.ISoftwareConfig=} [properties] Properties to set */ - function MetastoreConfig(properties) { + function SoftwareConfig(properties) { + this.properties = {}; + this.optionalComponents = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -17953,203 +17902,382 @@ } /** - * MetastoreConfig dataprocMetastoreService. - * @member {string} dataprocMetastoreService - * @memberof google.cloud.dataproc.v1.MetastoreConfig + * SoftwareConfig imageVersion. + * @member {string} imageVersion + * @memberof google.cloud.dataproc.v1.SoftwareConfig * @instance */ - MetastoreConfig.prototype.dataprocMetastoreService = ""; + SoftwareConfig.prototype.imageVersion = ""; /** - * Creates a new MetastoreConfig instance using the specified properties. + * SoftwareConfig properties. + * @member {Object.} properties + * @memberof google.cloud.dataproc.v1.SoftwareConfig + * @instance + */ + SoftwareConfig.prototype.properties = $util.emptyObject; + + /** + * SoftwareConfig optionalComponents. + * @member {Array.} optionalComponents + * @memberof google.cloud.dataproc.v1.SoftwareConfig + * @instance + */ + SoftwareConfig.prototype.optionalComponents = $util.emptyArray; + + /** + * Creates a new SoftwareConfig instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @memberof google.cloud.dataproc.v1.SoftwareConfig * @static - * @param {google.cloud.dataproc.v1.IMetastoreConfig=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.MetastoreConfig} MetastoreConfig instance + * @param {google.cloud.dataproc.v1.ISoftwareConfig=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.SoftwareConfig} SoftwareConfig instance */ - MetastoreConfig.create = function create(properties) { - return new MetastoreConfig(properties); + SoftwareConfig.create = function create(properties) { + return new SoftwareConfig(properties); }; /** - * Encodes the specified MetastoreConfig message. Does not implicitly {@link google.cloud.dataproc.v1.MetastoreConfig.verify|verify} messages. + * Encodes the specified SoftwareConfig message. Does not implicitly {@link google.cloud.dataproc.v1.SoftwareConfig.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @memberof google.cloud.dataproc.v1.SoftwareConfig * @static - * @param {google.cloud.dataproc.v1.IMetastoreConfig} message MetastoreConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.ISoftwareConfig} message SoftwareConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MetastoreConfig.encode = function encode(message, writer) { + SoftwareConfig.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.dataprocMetastoreService != null && Object.hasOwnProperty.call(message, "dataprocMetastoreService")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.dataprocMetastoreService); + if (message.imageVersion != null && Object.hasOwnProperty.call(message, "imageVersion")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.imageVersion); + if (message.properties != null && Object.hasOwnProperty.call(message, "properties")) + for (var keys = Object.keys(message.properties), i = 0; i < keys.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.properties[keys[i]]).ldelim(); + if (message.optionalComponents != null && message.optionalComponents.length) { + writer.uint32(/* id 3, wireType 2 =*/26).fork(); + for (var i = 0; i < message.optionalComponents.length; ++i) + writer.int32(message.optionalComponents[i]); + writer.ldelim(); + } return writer; }; /** - * Encodes the specified MetastoreConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.MetastoreConfig.verify|verify} messages. + * Encodes the specified SoftwareConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.SoftwareConfig.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @memberof google.cloud.dataproc.v1.SoftwareConfig * @static - * @param {google.cloud.dataproc.v1.IMetastoreConfig} message MetastoreConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.ISoftwareConfig} message SoftwareConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MetastoreConfig.encodeDelimited = function encodeDelimited(message, writer) { + SoftwareConfig.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a MetastoreConfig message from the specified reader or buffer. + * Decodes a SoftwareConfig message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @memberof google.cloud.dataproc.v1.SoftwareConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.MetastoreConfig} MetastoreConfig + * @returns {google.cloud.dataproc.v1.SoftwareConfig} SoftwareConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MetastoreConfig.decode = function decode(reader, length) { + SoftwareConfig.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.MetastoreConfig(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.SoftwareConfig(), key, value; while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.dataprocMetastoreService = reader.string(); + message.imageVersion = reader.string(); break; } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; - - /** - * Decodes a MetastoreConfig message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.MetastoreConfig - * @static + case 2: { + if (message.properties === $util.emptyObject) + message.properties = {}; + var end2 = reader.uint32() + reader.pos; + key = ""; + value = ""; + while (reader.pos < end2) { + var tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.string(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.properties[key] = value; + break; + } + case 3: { + if (!(message.optionalComponents && message.optionalComponents.length)) + message.optionalComponents = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.optionalComponents.push(reader.int32()); + } else + message.optionalComponents.push(reader.int32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a SoftwareConfig message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1.SoftwareConfig + * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.MetastoreConfig} MetastoreConfig + * @returns {google.cloud.dataproc.v1.SoftwareConfig} SoftwareConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MetastoreConfig.decodeDelimited = function decodeDelimited(reader) { + SoftwareConfig.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a MetastoreConfig message. + * Verifies a SoftwareConfig message. * @function verify - * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @memberof google.cloud.dataproc.v1.SoftwareConfig * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - MetastoreConfig.verify = function verify(message) { + SoftwareConfig.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.dataprocMetastoreService != null && message.hasOwnProperty("dataprocMetastoreService")) - if (!$util.isString(message.dataprocMetastoreService)) - return "dataprocMetastoreService: string expected"; + if (message.imageVersion != null && message.hasOwnProperty("imageVersion")) + if (!$util.isString(message.imageVersion)) + return "imageVersion: string expected"; + if (message.properties != null && message.hasOwnProperty("properties")) { + if (!$util.isObject(message.properties)) + return "properties: object expected"; + var key = Object.keys(message.properties); + for (var i = 0; i < key.length; ++i) + if (!$util.isString(message.properties[key[i]])) + return "properties: string{k:string} expected"; + } + if (message.optionalComponents != null && message.hasOwnProperty("optionalComponents")) { + if (!Array.isArray(message.optionalComponents)) + return "optionalComponents: array expected"; + for (var i = 0; i < message.optionalComponents.length; ++i) + switch (message.optionalComponents[i]) { + default: + return "optionalComponents: enum value[] expected"; + case 0: + case 5: + case 13: + case 9: + case 14: + case 11: + case 3: + case 1: + case 6: + case 12: + case 10: + case 4: + case 8: + break; + } + } return null; }; /** - * Creates a MetastoreConfig message from a plain object. Also converts values to their respective internal types. + * Creates a SoftwareConfig message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @memberof google.cloud.dataproc.v1.SoftwareConfig * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.MetastoreConfig} MetastoreConfig + * @returns {google.cloud.dataproc.v1.SoftwareConfig} SoftwareConfig */ - MetastoreConfig.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.MetastoreConfig) + SoftwareConfig.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.SoftwareConfig) return object; - var message = new $root.google.cloud.dataproc.v1.MetastoreConfig(); - if (object.dataprocMetastoreService != null) - message.dataprocMetastoreService = String(object.dataprocMetastoreService); + var message = new $root.google.cloud.dataproc.v1.SoftwareConfig(); + if (object.imageVersion != null) + message.imageVersion = String(object.imageVersion); + if (object.properties) { + if (typeof object.properties !== "object") + throw TypeError(".google.cloud.dataproc.v1.SoftwareConfig.properties: object expected"); + message.properties = {}; + for (var keys = Object.keys(object.properties), i = 0; i < keys.length; ++i) + message.properties[keys[i]] = String(object.properties[keys[i]]); + } + if (object.optionalComponents) { + if (!Array.isArray(object.optionalComponents)) + throw TypeError(".google.cloud.dataproc.v1.SoftwareConfig.optionalComponents: array expected"); + message.optionalComponents = []; + for (var i = 0; i < object.optionalComponents.length; ++i) + switch (object.optionalComponents[i]) { + default: + if (typeof object.optionalComponents[i] === "number") { + message.optionalComponents[i] = object.optionalComponents[i]; + break; + } + case "COMPONENT_UNSPECIFIED": + case 0: + message.optionalComponents[i] = 0; + break; + case "ANACONDA": + case 5: + message.optionalComponents[i] = 5; + break; + case "DOCKER": + case 13: + message.optionalComponents[i] = 13; + break; + case "DRUID": + case 9: + message.optionalComponents[i] = 9; + break; + case "FLINK": + case 14: + message.optionalComponents[i] = 14; + break; + case "HBASE": + case 11: + message.optionalComponents[i] = 11; + break; + case "HIVE_WEBHCAT": + case 3: + message.optionalComponents[i] = 3; + break; + case "JUPYTER": + case 1: + message.optionalComponents[i] = 1; + break; + case "PRESTO": + case 6: + message.optionalComponents[i] = 6; + break; + case "RANGER": + case 12: + message.optionalComponents[i] = 12; + break; + case "SOLR": + case 10: + message.optionalComponents[i] = 10; + break; + case "ZEPPELIN": + case 4: + message.optionalComponents[i] = 4; + break; + case "ZOOKEEPER": + case 8: + message.optionalComponents[i] = 8; + break; + } + } return message; }; /** - * Creates a plain object from a MetastoreConfig message. Also converts values to other types if specified. + * Creates a plain object from a SoftwareConfig message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @memberof google.cloud.dataproc.v1.SoftwareConfig * @static - * @param {google.cloud.dataproc.v1.MetastoreConfig} message MetastoreConfig + * @param {google.cloud.dataproc.v1.SoftwareConfig} message SoftwareConfig * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - MetastoreConfig.toObject = function toObject(message, options) { + SoftwareConfig.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; + if (options.arrays || options.defaults) + object.optionalComponents = []; + if (options.objects || options.defaults) + object.properties = {}; if (options.defaults) - object.dataprocMetastoreService = ""; - if (message.dataprocMetastoreService != null && message.hasOwnProperty("dataprocMetastoreService")) - object.dataprocMetastoreService = message.dataprocMetastoreService; + object.imageVersion = ""; + if (message.imageVersion != null && message.hasOwnProperty("imageVersion")) + object.imageVersion = message.imageVersion; + var keys2; + if (message.properties && (keys2 = Object.keys(message.properties)).length) { + object.properties = {}; + for (var j = 0; j < keys2.length; ++j) + object.properties[keys2[j]] = message.properties[keys2[j]]; + } + if (message.optionalComponents && message.optionalComponents.length) { + object.optionalComponents = []; + for (var j = 0; j < message.optionalComponents.length; ++j) + object.optionalComponents[j] = options.enums === String ? $root.google.cloud.dataproc.v1.Component[message.optionalComponents[j]] === undefined ? message.optionalComponents[j] : $root.google.cloud.dataproc.v1.Component[message.optionalComponents[j]] : message.optionalComponents[j]; + } return object; }; /** - * Converts this MetastoreConfig to JSON. + * Converts this SoftwareConfig to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @memberof google.cloud.dataproc.v1.SoftwareConfig * @instance * @returns {Object.} JSON object */ - MetastoreConfig.prototype.toJSON = function toJSON() { + SoftwareConfig.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for MetastoreConfig + * Gets the default type url for SoftwareConfig * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @memberof google.cloud.dataproc.v1.SoftwareConfig * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - MetastoreConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SoftwareConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.MetastoreConfig"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.SoftwareConfig"; }; - return MetastoreConfig; + return SoftwareConfig; })(); - v1.DataprocMetricConfig = (function() { + v1.LifecycleConfig = (function() { /** - * Properties of a DataprocMetricConfig. + * Properties of a LifecycleConfig. * @memberof google.cloud.dataproc.v1 - * @interface IDataprocMetricConfig - * @property {Array.|null} [metrics] DataprocMetricConfig metrics + * @interface ILifecycleConfig + * @property {google.protobuf.IDuration|null} [idleDeleteTtl] LifecycleConfig idleDeleteTtl + * @property {google.protobuf.ITimestamp|null} [autoDeleteTime] LifecycleConfig autoDeleteTime + * @property {google.protobuf.IDuration|null} [autoDeleteTtl] LifecycleConfig autoDeleteTtl + * @property {google.protobuf.ITimestamp|null} [idleStartTime] LifecycleConfig idleStartTime */ /** - * Constructs a new DataprocMetricConfig. + * Constructs a new LifecycleConfig. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a DataprocMetricConfig. - * @implements IDataprocMetricConfig + * @classdesc Represents a LifecycleConfig. + * @implements ILifecycleConfig * @constructor - * @param {google.cloud.dataproc.v1.IDataprocMetricConfig=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.ILifecycleConfig=} [properties] Properties to set */ - function DataprocMetricConfig(properties) { - this.metrics = []; + function LifecycleConfig(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -18157,78 +18285,131 @@ } /** - * DataprocMetricConfig metrics. - * @member {Array.} metrics - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig + * LifecycleConfig idleDeleteTtl. + * @member {google.protobuf.IDuration|null|undefined} idleDeleteTtl + * @memberof google.cloud.dataproc.v1.LifecycleConfig * @instance */ - DataprocMetricConfig.prototype.metrics = $util.emptyArray; + LifecycleConfig.prototype.idleDeleteTtl = null; /** - * Creates a new DataprocMetricConfig instance using the specified properties. + * LifecycleConfig autoDeleteTime. + * @member {google.protobuf.ITimestamp|null|undefined} autoDeleteTime + * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @instance + */ + LifecycleConfig.prototype.autoDeleteTime = null; + + /** + * LifecycleConfig autoDeleteTtl. + * @member {google.protobuf.IDuration|null|undefined} autoDeleteTtl + * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @instance + */ + LifecycleConfig.prototype.autoDeleteTtl = null; + + /** + * LifecycleConfig idleStartTime. + * @member {google.protobuf.ITimestamp|null|undefined} idleStartTime + * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @instance + */ + LifecycleConfig.prototype.idleStartTime = null; + + // OneOf field names bound to virtual getters and setters + var $oneOfFields; + + /** + * LifecycleConfig ttl. + * @member {"autoDeleteTime"|"autoDeleteTtl"|undefined} ttl + * @memberof google.cloud.dataproc.v1.LifecycleConfig + * @instance + */ + Object.defineProperty(LifecycleConfig.prototype, "ttl", { + get: $util.oneOfGetter($oneOfFields = ["autoDeleteTime", "autoDeleteTtl"]), + set: $util.oneOfSetter($oneOfFields) + }); + + /** + * Creates a new LifecycleConfig instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig + * @memberof google.cloud.dataproc.v1.LifecycleConfig * @static - * @param {google.cloud.dataproc.v1.IDataprocMetricConfig=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.DataprocMetricConfig} DataprocMetricConfig instance + * @param {google.cloud.dataproc.v1.ILifecycleConfig=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.LifecycleConfig} LifecycleConfig instance */ - DataprocMetricConfig.create = function create(properties) { - return new DataprocMetricConfig(properties); + LifecycleConfig.create = function create(properties) { + return new LifecycleConfig(properties); }; /** - * Encodes the specified DataprocMetricConfig message. Does not implicitly {@link google.cloud.dataproc.v1.DataprocMetricConfig.verify|verify} messages. + * Encodes the specified LifecycleConfig message. Does not implicitly {@link google.cloud.dataproc.v1.LifecycleConfig.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig + * @memberof google.cloud.dataproc.v1.LifecycleConfig * @static - * @param {google.cloud.dataproc.v1.IDataprocMetricConfig} message DataprocMetricConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.ILifecycleConfig} message LifecycleConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DataprocMetricConfig.encode = function encode(message, writer) { + LifecycleConfig.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.metrics != null && message.metrics.length) - for (var i = 0; i < message.metrics.length; ++i) - $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric.encode(message.metrics[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.idleDeleteTtl != null && Object.hasOwnProperty.call(message, "idleDeleteTtl")) + $root.google.protobuf.Duration.encode(message.idleDeleteTtl, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.autoDeleteTime != null && Object.hasOwnProperty.call(message, "autoDeleteTime")) + $root.google.protobuf.Timestamp.encode(message.autoDeleteTime, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.autoDeleteTtl != null && Object.hasOwnProperty.call(message, "autoDeleteTtl")) + $root.google.protobuf.Duration.encode(message.autoDeleteTtl, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.idleStartTime != null && Object.hasOwnProperty.call(message, "idleStartTime")) + $root.google.protobuf.Timestamp.encode(message.idleStartTime, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified DataprocMetricConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DataprocMetricConfig.verify|verify} messages. + * Encodes the specified LifecycleConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.LifecycleConfig.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig + * @memberof google.cloud.dataproc.v1.LifecycleConfig * @static - * @param {google.cloud.dataproc.v1.IDataprocMetricConfig} message DataprocMetricConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.ILifecycleConfig} message LifecycleConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DataprocMetricConfig.encodeDelimited = function encodeDelimited(message, writer) { + LifecycleConfig.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DataprocMetricConfig message from the specified reader or buffer. + * Decodes a LifecycleConfig message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig + * @memberof google.cloud.dataproc.v1.LifecycleConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.DataprocMetricConfig} DataprocMetricConfig + * @returns {google.cloud.dataproc.v1.LifecycleConfig} LifecycleConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DataprocMetricConfig.decode = function decode(reader, length) { + LifecycleConfig.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.DataprocMetricConfig(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.LifecycleConfig(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.metrics && message.metrics.length)) - message.metrics = []; - message.metrics.push($root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric.decode(reader, reader.uint32())); + message.idleDeleteTtl = $root.google.protobuf.Duration.decode(reader, reader.uint32()); + break; + } + case 2: { + message.autoDeleteTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + break; + } + case 3: { + message.autoDeleteTtl = $root.google.protobuf.Duration.decode(reader, reader.uint32()); + break; + } + case 4: { + message.idleStartTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); break; } default: @@ -18240,453 +18421,384 @@ }; /** - * Decodes a DataprocMetricConfig message from the specified reader or buffer, length delimited. + * Decodes a LifecycleConfig message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig + * @memberof google.cloud.dataproc.v1.LifecycleConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.DataprocMetricConfig} DataprocMetricConfig + * @returns {google.cloud.dataproc.v1.LifecycleConfig} LifecycleConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DataprocMetricConfig.decodeDelimited = function decodeDelimited(reader) { + LifecycleConfig.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DataprocMetricConfig message. + * Verifies a LifecycleConfig message. * @function verify - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig + * @memberof google.cloud.dataproc.v1.LifecycleConfig * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DataprocMetricConfig.verify = function verify(message) { + LifecycleConfig.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.metrics != null && message.hasOwnProperty("metrics")) { - if (!Array.isArray(message.metrics)) - return "metrics: array expected"; - for (var i = 0; i < message.metrics.length; ++i) { - var error = $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric.verify(message.metrics[i]); + var properties = {}; + if (message.idleDeleteTtl != null && message.hasOwnProperty("idleDeleteTtl")) { + var error = $root.google.protobuf.Duration.verify(message.idleDeleteTtl); + if (error) + return "idleDeleteTtl." + error; + } + if (message.autoDeleteTime != null && message.hasOwnProperty("autoDeleteTime")) { + properties.ttl = 1; + { + var error = $root.google.protobuf.Timestamp.verify(message.autoDeleteTime); if (error) - return "metrics." + error; + return "autoDeleteTime." + error; + } + } + if (message.autoDeleteTtl != null && message.hasOwnProperty("autoDeleteTtl")) { + if (properties.ttl === 1) + return "ttl: multiple values"; + properties.ttl = 1; + { + var error = $root.google.protobuf.Duration.verify(message.autoDeleteTtl); + if (error) + return "autoDeleteTtl." + error; } } + if (message.idleStartTime != null && message.hasOwnProperty("idleStartTime")) { + var error = $root.google.protobuf.Timestamp.verify(message.idleStartTime); + if (error) + return "idleStartTime." + error; + } return null; }; /** - * Creates a DataprocMetricConfig message from a plain object. Also converts values to their respective internal types. + * Creates a LifecycleConfig message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig + * @memberof google.cloud.dataproc.v1.LifecycleConfig * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.DataprocMetricConfig} DataprocMetricConfig + * @returns {google.cloud.dataproc.v1.LifecycleConfig} LifecycleConfig */ - DataprocMetricConfig.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.DataprocMetricConfig) + LifecycleConfig.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.LifecycleConfig) return object; - var message = new $root.google.cloud.dataproc.v1.DataprocMetricConfig(); - if (object.metrics) { - if (!Array.isArray(object.metrics)) - throw TypeError(".google.cloud.dataproc.v1.DataprocMetricConfig.metrics: array expected"); - message.metrics = []; - for (var i = 0; i < object.metrics.length; ++i) { - if (typeof object.metrics[i] !== "object") - throw TypeError(".google.cloud.dataproc.v1.DataprocMetricConfig.metrics: object expected"); - message.metrics[i] = $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric.fromObject(object.metrics[i]); - } + var message = new $root.google.cloud.dataproc.v1.LifecycleConfig(); + if (object.idleDeleteTtl != null) { + if (typeof object.idleDeleteTtl !== "object") + throw TypeError(".google.cloud.dataproc.v1.LifecycleConfig.idleDeleteTtl: object expected"); + message.idleDeleteTtl = $root.google.protobuf.Duration.fromObject(object.idleDeleteTtl); + } + if (object.autoDeleteTime != null) { + if (typeof object.autoDeleteTime !== "object") + throw TypeError(".google.cloud.dataproc.v1.LifecycleConfig.autoDeleteTime: object expected"); + message.autoDeleteTime = $root.google.protobuf.Timestamp.fromObject(object.autoDeleteTime); + } + if (object.autoDeleteTtl != null) { + if (typeof object.autoDeleteTtl !== "object") + throw TypeError(".google.cloud.dataproc.v1.LifecycleConfig.autoDeleteTtl: object expected"); + message.autoDeleteTtl = $root.google.protobuf.Duration.fromObject(object.autoDeleteTtl); + } + if (object.idleStartTime != null) { + if (typeof object.idleStartTime !== "object") + throw TypeError(".google.cloud.dataproc.v1.LifecycleConfig.idleStartTime: object expected"); + message.idleStartTime = $root.google.protobuf.Timestamp.fromObject(object.idleStartTime); } return message; }; /** - * Creates a plain object from a DataprocMetricConfig message. Also converts values to other types if specified. + * Creates a plain object from a LifecycleConfig message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig + * @memberof google.cloud.dataproc.v1.LifecycleConfig * @static - * @param {google.cloud.dataproc.v1.DataprocMetricConfig} message DataprocMetricConfig + * @param {google.cloud.dataproc.v1.LifecycleConfig} message LifecycleConfig * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DataprocMetricConfig.toObject = function toObject(message, options) { + LifecycleConfig.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.arrays || options.defaults) - object.metrics = []; - if (message.metrics && message.metrics.length) { - object.metrics = []; - for (var j = 0; j < message.metrics.length; ++j) - object.metrics[j] = $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric.toObject(message.metrics[j], options); + if (options.defaults) { + object.idleDeleteTtl = null; + object.idleStartTime = null; + } + if (message.idleDeleteTtl != null && message.hasOwnProperty("idleDeleteTtl")) + object.idleDeleteTtl = $root.google.protobuf.Duration.toObject(message.idleDeleteTtl, options); + if (message.autoDeleteTime != null && message.hasOwnProperty("autoDeleteTime")) { + object.autoDeleteTime = $root.google.protobuf.Timestamp.toObject(message.autoDeleteTime, options); + if (options.oneofs) + object.ttl = "autoDeleteTime"; + } + if (message.autoDeleteTtl != null && message.hasOwnProperty("autoDeleteTtl")) { + object.autoDeleteTtl = $root.google.protobuf.Duration.toObject(message.autoDeleteTtl, options); + if (options.oneofs) + object.ttl = "autoDeleteTtl"; } + if (message.idleStartTime != null && message.hasOwnProperty("idleStartTime")) + object.idleStartTime = $root.google.protobuf.Timestamp.toObject(message.idleStartTime, options); return object; }; /** - * Converts this DataprocMetricConfig to JSON. + * Converts this LifecycleConfig to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig + * @memberof google.cloud.dataproc.v1.LifecycleConfig * @instance * @returns {Object.} JSON object */ - DataprocMetricConfig.prototype.toJSON = function toJSON() { + LifecycleConfig.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DataprocMetricConfig + * Gets the default type url for LifecycleConfig * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig + * @memberof google.cloud.dataproc.v1.LifecycleConfig * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DataprocMetricConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + LifecycleConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.DataprocMetricConfig"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.LifecycleConfig"; }; + return LifecycleConfig; + })(); + + v1.MetastoreConfig = (function() { + /** - * MetricSource enum. - * @name google.cloud.dataproc.v1.DataprocMetricConfig.MetricSource - * @enum {number} - * @property {number} METRIC_SOURCE_UNSPECIFIED=0 METRIC_SOURCE_UNSPECIFIED value - * @property {number} MONITORING_AGENT_DEFAULTS=1 MONITORING_AGENT_DEFAULTS value - * @property {number} HDFS=2 HDFS value - * @property {number} SPARK=3 SPARK value - * @property {number} YARN=4 YARN value - * @property {number} SPARK_HISTORY_SERVER=5 SPARK_HISTORY_SERVER value - * @property {number} HIVESERVER2=6 HIVESERVER2 value + * Properties of a MetastoreConfig. + * @memberof google.cloud.dataproc.v1 + * @interface IMetastoreConfig + * @property {string|null} [dataprocMetastoreService] MetastoreConfig dataprocMetastoreService */ - DataprocMetricConfig.MetricSource = (function() { - var valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "METRIC_SOURCE_UNSPECIFIED"] = 0; - values[valuesById[1] = "MONITORING_AGENT_DEFAULTS"] = 1; - values[valuesById[2] = "HDFS"] = 2; - values[valuesById[3] = "SPARK"] = 3; - values[valuesById[4] = "YARN"] = 4; - values[valuesById[5] = "SPARK_HISTORY_SERVER"] = 5; - values[valuesById[6] = "HIVESERVER2"] = 6; - return values; - })(); - - DataprocMetricConfig.Metric = (function() { - - /** - * Properties of a Metric. - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig - * @interface IMetric - * @property {google.cloud.dataproc.v1.DataprocMetricConfig.MetricSource|null} [metricSource] Metric metricSource - * @property {Array.|null} [metricOverrides] Metric metricOverrides - */ - - /** - * Constructs a new Metric. - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig - * @classdesc Represents a Metric. - * @implements IMetric - * @constructor - * @param {google.cloud.dataproc.v1.DataprocMetricConfig.IMetric=} [properties] Properties to set - */ - function Metric(properties) { - this.metricOverrides = []; - if (properties) - for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } - /** - * Metric metricSource. - * @member {google.cloud.dataproc.v1.DataprocMetricConfig.MetricSource} metricSource - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric - * @instance - */ - Metric.prototype.metricSource = 0; + /** + * Constructs a new MetastoreConfig. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a MetastoreConfig. + * @implements IMetastoreConfig + * @constructor + * @param {google.cloud.dataproc.v1.IMetastoreConfig=} [properties] Properties to set + */ + function MetastoreConfig(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * Metric metricOverrides. - * @member {Array.} metricOverrides - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric - * @instance - */ - Metric.prototype.metricOverrides = $util.emptyArray; + /** + * MetastoreConfig dataprocMetastoreService. + * @member {string} dataprocMetastoreService + * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @instance + */ + MetastoreConfig.prototype.dataprocMetastoreService = ""; - /** - * Creates a new Metric instance using the specified properties. - * @function create - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric - * @static - * @param {google.cloud.dataproc.v1.DataprocMetricConfig.IMetric=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.DataprocMetricConfig.Metric} Metric instance - */ - Metric.create = function create(properties) { - return new Metric(properties); - }; + /** + * Creates a new MetastoreConfig instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @static + * @param {google.cloud.dataproc.v1.IMetastoreConfig=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.MetastoreConfig} MetastoreConfig instance + */ + MetastoreConfig.create = function create(properties) { + return new MetastoreConfig(properties); + }; - /** - * Encodes the specified Metric message. Does not implicitly {@link google.cloud.dataproc.v1.DataprocMetricConfig.Metric.verify|verify} messages. - * @function encode - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric - * @static - * @param {google.cloud.dataproc.v1.DataprocMetricConfig.IMetric} message Metric message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Metric.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.metricSource != null && Object.hasOwnProperty.call(message, "metricSource")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.metricSource); - if (message.metricOverrides != null && message.metricOverrides.length) - for (var i = 0; i < message.metricOverrides.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.metricOverrides[i]); - return writer; - }; + /** + * Encodes the specified MetastoreConfig message. Does not implicitly {@link google.cloud.dataproc.v1.MetastoreConfig.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @static + * @param {google.cloud.dataproc.v1.IMetastoreConfig} message MetastoreConfig message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + MetastoreConfig.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.dataprocMetastoreService != null && Object.hasOwnProperty.call(message, "dataprocMetastoreService")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.dataprocMetastoreService); + return writer; + }; - /** - * Encodes the specified Metric message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DataprocMetricConfig.Metric.verify|verify} messages. - * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric - * @static - * @param {google.cloud.dataproc.v1.DataprocMetricConfig.IMetric} message Metric message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Metric.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + /** + * Encodes the specified MetastoreConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.MetastoreConfig.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @static + * @param {google.cloud.dataproc.v1.IMetastoreConfig} message MetastoreConfig message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + MetastoreConfig.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; - /** - * Decodes a Metric message from the specified reader or buffer. - * @function decode - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.DataprocMetricConfig.Metric} Metric - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Metric.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric(); - while (reader.pos < end) { - var tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.metricSource = reader.int32(); - break; - } - case 2: { - if (!(message.metricOverrides && message.metricOverrides.length)) - message.metricOverrides = []; - message.metricOverrides.push(reader.string()); - break; - } - default: - reader.skipType(tag & 7); + /** + * Decodes a MetastoreConfig message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1.MetastoreConfig} MetastoreConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + MetastoreConfig.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.MetastoreConfig(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.dataprocMetastoreService = reader.string(); break; } + default: + reader.skipType(tag & 7); + break; } - return message; - }; + } + return message; + }; - /** - * Decodes a Metric message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.DataprocMetricConfig.Metric} Metric - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Metric.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + /** + * Decodes a MetastoreConfig message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1.MetastoreConfig} MetastoreConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + MetastoreConfig.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; - /** - * Verifies a Metric message. - * @function verify - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - Metric.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.metricSource != null && message.hasOwnProperty("metricSource")) - switch (message.metricSource) { - default: - return "metricSource: enum value expected"; - case 0: - case 1: - case 2: - case 3: - case 4: - case 5: - case 6: - break; - } - if (message.metricOverrides != null && message.hasOwnProperty("metricOverrides")) { - if (!Array.isArray(message.metricOverrides)) - return "metricOverrides: array expected"; - for (var i = 0; i < message.metricOverrides.length; ++i) - if (!$util.isString(message.metricOverrides[i])) - return "metricOverrides: string[] expected"; - } - return null; - }; - - /** - * Creates a Metric message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric - * @static - * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.DataprocMetricConfig.Metric} Metric - */ - Metric.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric) - return object; - var message = new $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric(); - switch (object.metricSource) { - default: - if (typeof object.metricSource === "number") { - message.metricSource = object.metricSource; - break; - } - break; - case "METRIC_SOURCE_UNSPECIFIED": - case 0: - message.metricSource = 0; - break; - case "MONITORING_AGENT_DEFAULTS": - case 1: - message.metricSource = 1; - break; - case "HDFS": - case 2: - message.metricSource = 2; - break; - case "SPARK": - case 3: - message.metricSource = 3; - break; - case "YARN": - case 4: - message.metricSource = 4; - break; - case "SPARK_HISTORY_SERVER": - case 5: - message.metricSource = 5; - break; - case "HIVESERVER2": - case 6: - message.metricSource = 6; - break; - } - if (object.metricOverrides) { - if (!Array.isArray(object.metricOverrides)) - throw TypeError(".google.cloud.dataproc.v1.DataprocMetricConfig.Metric.metricOverrides: array expected"); - message.metricOverrides = []; - for (var i = 0; i < object.metricOverrides.length; ++i) - message.metricOverrides[i] = String(object.metricOverrides[i]); - } - return message; - }; + /** + * Verifies a MetastoreConfig message. + * @function verify + * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + MetastoreConfig.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.dataprocMetastoreService != null && message.hasOwnProperty("dataprocMetastoreService")) + if (!$util.isString(message.dataprocMetastoreService)) + return "dataprocMetastoreService: string expected"; + return null; + }; - /** - * Creates a plain object from a Metric message. Also converts values to other types if specified. - * @function toObject - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric - * @static - * @param {google.cloud.dataproc.v1.DataprocMetricConfig.Metric} message Metric - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - Metric.toObject = function toObject(message, options) { - if (!options) - options = {}; - var object = {}; - if (options.arrays || options.defaults) - object.metricOverrides = []; - if (options.defaults) - object.metricSource = options.enums === String ? "METRIC_SOURCE_UNSPECIFIED" : 0; - if (message.metricSource != null && message.hasOwnProperty("metricSource")) - object.metricSource = options.enums === String ? $root.google.cloud.dataproc.v1.DataprocMetricConfig.MetricSource[message.metricSource] === undefined ? message.metricSource : $root.google.cloud.dataproc.v1.DataprocMetricConfig.MetricSource[message.metricSource] : message.metricSource; - if (message.metricOverrides && message.metricOverrides.length) { - object.metricOverrides = []; - for (var j = 0; j < message.metricOverrides.length; ++j) - object.metricOverrides[j] = message.metricOverrides[j]; - } + /** + * Creates a MetastoreConfig message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1.MetastoreConfig} MetastoreConfig + */ + MetastoreConfig.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.MetastoreConfig) return object; - }; + var message = new $root.google.cloud.dataproc.v1.MetastoreConfig(); + if (object.dataprocMetastoreService != null) + message.dataprocMetastoreService = String(object.dataprocMetastoreService); + return message; + }; - /** - * Converts this Metric to JSON. - * @function toJSON - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric - * @instance - * @returns {Object.} JSON object - */ - Metric.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + /** + * Creates a plain object from a MetastoreConfig message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @static + * @param {google.cloud.dataproc.v1.MetastoreConfig} message MetastoreConfig + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + MetastoreConfig.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) + object.dataprocMetastoreService = ""; + if (message.dataprocMetastoreService != null && message.hasOwnProperty("dataprocMetastoreService")) + object.dataprocMetastoreService = message.dataprocMetastoreService; + return object; + }; - /** - * Gets the default type url for Metric - * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - Metric.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/google.cloud.dataproc.v1.DataprocMetricConfig.Metric"; - }; + /** + * Converts this MetastoreConfig to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @instance + * @returns {Object.} JSON object + */ + MetastoreConfig.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; - return Metric; - })(); + /** + * Gets the default type url for MetastoreConfig + * @function getTypeUrl + * @memberof google.cloud.dataproc.v1.MetastoreConfig + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + MetastoreConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/google.cloud.dataproc.v1.MetastoreConfig"; + }; - return DataprocMetricConfig; + return MetastoreConfig; })(); - v1.ClusterMetrics = (function() { + v1.DataprocMetricConfig = (function() { /** - * Properties of a ClusterMetrics. + * Properties of a DataprocMetricConfig. * @memberof google.cloud.dataproc.v1 - * @interface IClusterMetrics - * @property {Object.|null} [hdfsMetrics] ClusterMetrics hdfsMetrics - * @property {Object.|null} [yarnMetrics] ClusterMetrics yarnMetrics + * @interface IDataprocMetricConfig + * @property {Array.|null} [metrics] DataprocMetricConfig metrics */ /** - * Constructs a new ClusterMetrics. + * Constructs a new DataprocMetricConfig. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a ClusterMetrics. - * @implements IClusterMetrics + * @classdesc Represents a DataprocMetricConfig. + * @implements IDataprocMetricConfig * @constructor - * @param {google.cloud.dataproc.v1.IClusterMetrics=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IDataprocMetricConfig=} [properties] Properties to set */ - function ClusterMetrics(properties) { - this.hdfsMetrics = {}; - this.yarnMetrics = {}; + function DataprocMetricConfig(properties) { + this.metrics = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -18694,129 +18806,78 @@ } /** - * ClusterMetrics hdfsMetrics. - * @member {Object.} hdfsMetrics - * @memberof google.cloud.dataproc.v1.ClusterMetrics - * @instance - */ - ClusterMetrics.prototype.hdfsMetrics = $util.emptyObject; - - /** - * ClusterMetrics yarnMetrics. - * @member {Object.} yarnMetrics - * @memberof google.cloud.dataproc.v1.ClusterMetrics + * DataprocMetricConfig metrics. + * @member {Array.} metrics + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig * @instance */ - ClusterMetrics.prototype.yarnMetrics = $util.emptyObject; + DataprocMetricConfig.prototype.metrics = $util.emptyArray; /** - * Creates a new ClusterMetrics instance using the specified properties. + * Creates a new DataprocMetricConfig instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.ClusterMetrics + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig * @static - * @param {google.cloud.dataproc.v1.IClusterMetrics=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.ClusterMetrics} ClusterMetrics instance + * @param {google.cloud.dataproc.v1.IDataprocMetricConfig=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.DataprocMetricConfig} DataprocMetricConfig instance */ - ClusterMetrics.create = function create(properties) { - return new ClusterMetrics(properties); + DataprocMetricConfig.create = function create(properties) { + return new DataprocMetricConfig(properties); }; /** - * Encodes the specified ClusterMetrics message. Does not implicitly {@link google.cloud.dataproc.v1.ClusterMetrics.verify|verify} messages. + * Encodes the specified DataprocMetricConfig message. Does not implicitly {@link google.cloud.dataproc.v1.DataprocMetricConfig.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.ClusterMetrics + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig * @static - * @param {google.cloud.dataproc.v1.IClusterMetrics} message ClusterMetrics message or plain object to encode + * @param {google.cloud.dataproc.v1.IDataprocMetricConfig} message DataprocMetricConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ClusterMetrics.encode = function encode(message, writer) { + DataprocMetricConfig.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.hdfsMetrics != null && Object.hasOwnProperty.call(message, "hdfsMetrics")) - for (var keys = Object.keys(message.hdfsMetrics), i = 0; i < keys.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).int64(message.hdfsMetrics[keys[i]]).ldelim(); - if (message.yarnMetrics != null && Object.hasOwnProperty.call(message, "yarnMetrics")) - for (var keys = Object.keys(message.yarnMetrics), i = 0; i < keys.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).int64(message.yarnMetrics[keys[i]]).ldelim(); + if (message.metrics != null && message.metrics.length) + for (var i = 0; i < message.metrics.length; ++i) + $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric.encode(message.metrics[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ClusterMetrics message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ClusterMetrics.verify|verify} messages. + * Encodes the specified DataprocMetricConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DataprocMetricConfig.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.ClusterMetrics + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig * @static - * @param {google.cloud.dataproc.v1.IClusterMetrics} message ClusterMetrics message or plain object to encode + * @param {google.cloud.dataproc.v1.IDataprocMetricConfig} message DataprocMetricConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ClusterMetrics.encodeDelimited = function encodeDelimited(message, writer) { + DataprocMetricConfig.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ClusterMetrics message from the specified reader or buffer. + * Decodes a DataprocMetricConfig message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.ClusterMetrics + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.ClusterMetrics} ClusterMetrics + * @returns {google.cloud.dataproc.v1.DataprocMetricConfig} DataprocMetricConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ClusterMetrics.decode = function decode(reader, length) { + DataprocMetricConfig.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ClusterMetrics(), key, value; + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.DataprocMetricConfig(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (message.hdfsMetrics === $util.emptyObject) - message.hdfsMetrics = {}; - var end2 = reader.uint32() + reader.pos; - key = ""; - value = 0; - while (reader.pos < end2) { - var tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = reader.int64(); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.hdfsMetrics[key] = value; - break; - } - case 2: { - if (message.yarnMetrics === $util.emptyObject) - message.yarnMetrics = {}; - var end2 = reader.uint32() + reader.pos; - key = ""; - value = 0; - while (reader.pos < end2) { - var tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = reader.int64(); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.yarnMetrics[key] = value; + if (!(message.metrics && message.metrics.length)) + message.metrics = []; + message.metrics.push($root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric.decode(reader, reader.uint32())); break; } default: @@ -18828,509 +18889,453 @@ }; /** - * Decodes a ClusterMetrics message from the specified reader or buffer, length delimited. + * Decodes a DataprocMetricConfig message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.ClusterMetrics + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.ClusterMetrics} ClusterMetrics + * @returns {google.cloud.dataproc.v1.DataprocMetricConfig} DataprocMetricConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ClusterMetrics.decodeDelimited = function decodeDelimited(reader) { + DataprocMetricConfig.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ClusterMetrics message. + * Verifies a DataprocMetricConfig message. * @function verify - * @memberof google.cloud.dataproc.v1.ClusterMetrics + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ClusterMetrics.verify = function verify(message) { + DataprocMetricConfig.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.hdfsMetrics != null && message.hasOwnProperty("hdfsMetrics")) { - if (!$util.isObject(message.hdfsMetrics)) - return "hdfsMetrics: object expected"; - var key = Object.keys(message.hdfsMetrics); - for (var i = 0; i < key.length; ++i) - if (!$util.isInteger(message.hdfsMetrics[key[i]]) && !(message.hdfsMetrics[key[i]] && $util.isInteger(message.hdfsMetrics[key[i]].low) && $util.isInteger(message.hdfsMetrics[key[i]].high))) - return "hdfsMetrics: integer|Long{k:string} expected"; - } - if (message.yarnMetrics != null && message.hasOwnProperty("yarnMetrics")) { - if (!$util.isObject(message.yarnMetrics)) - return "yarnMetrics: object expected"; - var key = Object.keys(message.yarnMetrics); - for (var i = 0; i < key.length; ++i) - if (!$util.isInteger(message.yarnMetrics[key[i]]) && !(message.yarnMetrics[key[i]] && $util.isInteger(message.yarnMetrics[key[i]].low) && $util.isInteger(message.yarnMetrics[key[i]].high))) - return "yarnMetrics: integer|Long{k:string} expected"; + if (message.metrics != null && message.hasOwnProperty("metrics")) { + if (!Array.isArray(message.metrics)) + return "metrics: array expected"; + for (var i = 0; i < message.metrics.length; ++i) { + var error = $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric.verify(message.metrics[i]); + if (error) + return "metrics." + error; + } } return null; }; /** - * Creates a ClusterMetrics message from a plain object. Also converts values to their respective internal types. + * Creates a DataprocMetricConfig message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.ClusterMetrics + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.ClusterMetrics} ClusterMetrics + * @returns {google.cloud.dataproc.v1.DataprocMetricConfig} DataprocMetricConfig */ - ClusterMetrics.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.ClusterMetrics) + DataprocMetricConfig.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.DataprocMetricConfig) return object; - var message = new $root.google.cloud.dataproc.v1.ClusterMetrics(); - if (object.hdfsMetrics) { - if (typeof object.hdfsMetrics !== "object") - throw TypeError(".google.cloud.dataproc.v1.ClusterMetrics.hdfsMetrics: object expected"); - message.hdfsMetrics = {}; - for (var keys = Object.keys(object.hdfsMetrics), i = 0; i < keys.length; ++i) - if ($util.Long) - (message.hdfsMetrics[keys[i]] = $util.Long.fromValue(object.hdfsMetrics[keys[i]])).unsigned = false; - else if (typeof object.hdfsMetrics[keys[i]] === "string") - message.hdfsMetrics[keys[i]] = parseInt(object.hdfsMetrics[keys[i]], 10); - else if (typeof object.hdfsMetrics[keys[i]] === "number") - message.hdfsMetrics[keys[i]] = object.hdfsMetrics[keys[i]]; - else if (typeof object.hdfsMetrics[keys[i]] === "object") - message.hdfsMetrics[keys[i]] = new $util.LongBits(object.hdfsMetrics[keys[i]].low >>> 0, object.hdfsMetrics[keys[i]].high >>> 0).toNumber(); - } - if (object.yarnMetrics) { - if (typeof object.yarnMetrics !== "object") - throw TypeError(".google.cloud.dataproc.v1.ClusterMetrics.yarnMetrics: object expected"); - message.yarnMetrics = {}; - for (var keys = Object.keys(object.yarnMetrics), i = 0; i < keys.length; ++i) - if ($util.Long) - (message.yarnMetrics[keys[i]] = $util.Long.fromValue(object.yarnMetrics[keys[i]])).unsigned = false; - else if (typeof object.yarnMetrics[keys[i]] === "string") - message.yarnMetrics[keys[i]] = parseInt(object.yarnMetrics[keys[i]], 10); - else if (typeof object.yarnMetrics[keys[i]] === "number") - message.yarnMetrics[keys[i]] = object.yarnMetrics[keys[i]]; - else if (typeof object.yarnMetrics[keys[i]] === "object") - message.yarnMetrics[keys[i]] = new $util.LongBits(object.yarnMetrics[keys[i]].low >>> 0, object.yarnMetrics[keys[i]].high >>> 0).toNumber(); + var message = new $root.google.cloud.dataproc.v1.DataprocMetricConfig(); + if (object.metrics) { + if (!Array.isArray(object.metrics)) + throw TypeError(".google.cloud.dataproc.v1.DataprocMetricConfig.metrics: array expected"); + message.metrics = []; + for (var i = 0; i < object.metrics.length; ++i) { + if (typeof object.metrics[i] !== "object") + throw TypeError(".google.cloud.dataproc.v1.DataprocMetricConfig.metrics: object expected"); + message.metrics[i] = $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric.fromObject(object.metrics[i]); + } } return message; }; /** - * Creates a plain object from a ClusterMetrics message. Also converts values to other types if specified. + * Creates a plain object from a DataprocMetricConfig message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.ClusterMetrics + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig * @static - * @param {google.cloud.dataproc.v1.ClusterMetrics} message ClusterMetrics + * @param {google.cloud.dataproc.v1.DataprocMetricConfig} message DataprocMetricConfig * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ClusterMetrics.toObject = function toObject(message, options) { + DataprocMetricConfig.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.objects || options.defaults) { - object.hdfsMetrics = {}; - object.yarnMetrics = {}; - } - var keys2; - if (message.hdfsMetrics && (keys2 = Object.keys(message.hdfsMetrics)).length) { - object.hdfsMetrics = {}; - for (var j = 0; j < keys2.length; ++j) - if (typeof message.hdfsMetrics[keys2[j]] === "number") - object.hdfsMetrics[keys2[j]] = options.longs === String ? String(message.hdfsMetrics[keys2[j]]) : message.hdfsMetrics[keys2[j]]; - else - object.hdfsMetrics[keys2[j]] = options.longs === String ? $util.Long.prototype.toString.call(message.hdfsMetrics[keys2[j]]) : options.longs === Number ? new $util.LongBits(message.hdfsMetrics[keys2[j]].low >>> 0, message.hdfsMetrics[keys2[j]].high >>> 0).toNumber() : message.hdfsMetrics[keys2[j]]; - } - if (message.yarnMetrics && (keys2 = Object.keys(message.yarnMetrics)).length) { - object.yarnMetrics = {}; - for (var j = 0; j < keys2.length; ++j) - if (typeof message.yarnMetrics[keys2[j]] === "number") - object.yarnMetrics[keys2[j]] = options.longs === String ? String(message.yarnMetrics[keys2[j]]) : message.yarnMetrics[keys2[j]]; - else - object.yarnMetrics[keys2[j]] = options.longs === String ? $util.Long.prototype.toString.call(message.yarnMetrics[keys2[j]]) : options.longs === Number ? new $util.LongBits(message.yarnMetrics[keys2[j]].low >>> 0, message.yarnMetrics[keys2[j]].high >>> 0).toNumber() : message.yarnMetrics[keys2[j]]; + if (options.arrays || options.defaults) + object.metrics = []; + if (message.metrics && message.metrics.length) { + object.metrics = []; + for (var j = 0; j < message.metrics.length; ++j) + object.metrics[j] = $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric.toObject(message.metrics[j], options); } return object; }; /** - * Converts this ClusterMetrics to JSON. + * Converts this DataprocMetricConfig to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.ClusterMetrics + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig * @instance * @returns {Object.} JSON object */ - ClusterMetrics.prototype.toJSON = function toJSON() { + DataprocMetricConfig.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ClusterMetrics + * Gets the default type url for DataprocMetricConfig * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.ClusterMetrics + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ClusterMetrics.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + DataprocMetricConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.ClusterMetrics"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.DataprocMetricConfig"; }; - return ClusterMetrics; - })(); - - v1.CreateClusterRequest = (function() { - /** - * Properties of a CreateClusterRequest. - * @memberof google.cloud.dataproc.v1 - * @interface ICreateClusterRequest - * @property {string|null} [projectId] CreateClusterRequest projectId - * @property {string|null} [region] CreateClusterRequest region - * @property {google.cloud.dataproc.v1.ICluster|null} [cluster] CreateClusterRequest cluster - * @property {string|null} [requestId] CreateClusterRequest requestId - * @property {google.cloud.dataproc.v1.FailureAction|null} [actionOnFailedPrimaryWorkers] CreateClusterRequest actionOnFailedPrimaryWorkers + * MetricSource enum. + * @name google.cloud.dataproc.v1.DataprocMetricConfig.MetricSource + * @enum {number} + * @property {number} METRIC_SOURCE_UNSPECIFIED=0 METRIC_SOURCE_UNSPECIFIED value + * @property {number} MONITORING_AGENT_DEFAULTS=1 MONITORING_AGENT_DEFAULTS value + * @property {number} HDFS=2 HDFS value + * @property {number} SPARK=3 SPARK value + * @property {number} YARN=4 YARN value + * @property {number} SPARK_HISTORY_SERVER=5 SPARK_HISTORY_SERVER value + * @property {number} HIVESERVER2=6 HIVESERVER2 value */ + DataprocMetricConfig.MetricSource = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "METRIC_SOURCE_UNSPECIFIED"] = 0; + values[valuesById[1] = "MONITORING_AGENT_DEFAULTS"] = 1; + values[valuesById[2] = "HDFS"] = 2; + values[valuesById[3] = "SPARK"] = 3; + values[valuesById[4] = "YARN"] = 4; + values[valuesById[5] = "SPARK_HISTORY_SERVER"] = 5; + values[valuesById[6] = "HIVESERVER2"] = 6; + return values; + })(); - /** - * Constructs a new CreateClusterRequest. - * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a CreateClusterRequest. - * @implements ICreateClusterRequest - * @constructor - * @param {google.cloud.dataproc.v1.ICreateClusterRequest=} [properties] Properties to set - */ - function CreateClusterRequest(properties) { - if (properties) - for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } - - /** - * CreateClusterRequest projectId. - * @member {string} projectId - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @instance - */ - CreateClusterRequest.prototype.projectId = ""; + DataprocMetricConfig.Metric = (function() { - /** - * CreateClusterRequest region. - * @member {string} region - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @instance - */ - CreateClusterRequest.prototype.region = ""; + /** + * Properties of a Metric. + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig + * @interface IMetric + * @property {google.cloud.dataproc.v1.DataprocMetricConfig.MetricSource|null} [metricSource] Metric metricSource + * @property {Array.|null} [metricOverrides] Metric metricOverrides + */ - /** - * CreateClusterRequest cluster. - * @member {google.cloud.dataproc.v1.ICluster|null|undefined} cluster - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @instance - */ - CreateClusterRequest.prototype.cluster = null; + /** + * Constructs a new Metric. + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig + * @classdesc Represents a Metric. + * @implements IMetric + * @constructor + * @param {google.cloud.dataproc.v1.DataprocMetricConfig.IMetric=} [properties] Properties to set + */ + function Metric(properties) { + this.metricOverrides = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * CreateClusterRequest requestId. - * @member {string} requestId - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @instance - */ - CreateClusterRequest.prototype.requestId = ""; + /** + * Metric metricSource. + * @member {google.cloud.dataproc.v1.DataprocMetricConfig.MetricSource} metricSource + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric + * @instance + */ + Metric.prototype.metricSource = 0; - /** - * CreateClusterRequest actionOnFailedPrimaryWorkers. - * @member {google.cloud.dataproc.v1.FailureAction} actionOnFailedPrimaryWorkers - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @instance - */ - CreateClusterRequest.prototype.actionOnFailedPrimaryWorkers = 0; + /** + * Metric metricOverrides. + * @member {Array.} metricOverrides + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric + * @instance + */ + Metric.prototype.metricOverrides = $util.emptyArray; - /** - * Creates a new CreateClusterRequest instance using the specified properties. - * @function create - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @static - * @param {google.cloud.dataproc.v1.ICreateClusterRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.CreateClusterRequest} CreateClusterRequest instance - */ - CreateClusterRequest.create = function create(properties) { - return new CreateClusterRequest(properties); - }; + /** + * Creates a new Metric instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric + * @static + * @param {google.cloud.dataproc.v1.DataprocMetricConfig.IMetric=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.DataprocMetricConfig.Metric} Metric instance + */ + Metric.create = function create(properties) { + return new Metric(properties); + }; - /** - * Encodes the specified CreateClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.CreateClusterRequest.verify|verify} messages. - * @function encode - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @static - * @param {google.cloud.dataproc.v1.ICreateClusterRequest} message CreateClusterRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - CreateClusterRequest.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); - if (message.cluster != null && Object.hasOwnProperty.call(message, "cluster")) - $root.google.cloud.dataproc.v1.Cluster.encode(message.cluster, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.region != null && Object.hasOwnProperty.call(message, "region")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); - if (message.requestId != null && Object.hasOwnProperty.call(message, "requestId")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.requestId); - if (message.actionOnFailedPrimaryWorkers != null && Object.hasOwnProperty.call(message, "actionOnFailedPrimaryWorkers")) - writer.uint32(/* id 5, wireType 0 =*/40).int32(message.actionOnFailedPrimaryWorkers); - return writer; - }; + /** + * Encodes the specified Metric message. Does not implicitly {@link google.cloud.dataproc.v1.DataprocMetricConfig.Metric.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric + * @static + * @param {google.cloud.dataproc.v1.DataprocMetricConfig.IMetric} message Metric message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Metric.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.metricSource != null && Object.hasOwnProperty.call(message, "metricSource")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.metricSource); + if (message.metricOverrides != null && message.metricOverrides.length) + for (var i = 0; i < message.metricOverrides.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.metricOverrides[i]); + return writer; + }; - /** - * Encodes the specified CreateClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.CreateClusterRequest.verify|verify} messages. - * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @static - * @param {google.cloud.dataproc.v1.ICreateClusterRequest} message CreateClusterRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - CreateClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + /** + * Encodes the specified Metric message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DataprocMetricConfig.Metric.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric + * @static + * @param {google.cloud.dataproc.v1.DataprocMetricConfig.IMetric} message Metric message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Metric.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; - /** - * Decodes a CreateClusterRequest message from the specified reader or buffer. - * @function decode - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.CreateClusterRequest} CreateClusterRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - CreateClusterRequest.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.CreateClusterRequest(); - while (reader.pos < end) { - var tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.projectId = reader.string(); - break; - } - case 3: { - message.region = reader.string(); - break; - } - case 2: { - message.cluster = $root.google.cloud.dataproc.v1.Cluster.decode(reader, reader.uint32()); - break; - } - case 4: { - message.requestId = reader.string(); - break; - } - case 5: { - message.actionOnFailedPrimaryWorkers = reader.int32(); + /** + * Decodes a Metric message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1.DataprocMetricConfig.Metric} Metric + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Metric.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.metricSource = reader.int32(); + break; + } + case 2: { + if (!(message.metricOverrides && message.metricOverrides.length)) + message.metricOverrides = []; + message.metricOverrides.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); break; } - default: - reader.skipType(tag & 7); - break; } - } - return message; - }; - - /** - * Decodes a CreateClusterRequest message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.CreateClusterRequest} CreateClusterRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - CreateClusterRequest.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + return message; + }; - /** - * Verifies a CreateClusterRequest message. - * @function verify - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - CreateClusterRequest.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.projectId != null && message.hasOwnProperty("projectId")) - if (!$util.isString(message.projectId)) - return "projectId: string expected"; - if (message.region != null && message.hasOwnProperty("region")) - if (!$util.isString(message.region)) - return "region: string expected"; - if (message.cluster != null && message.hasOwnProperty("cluster")) { - var error = $root.google.cloud.dataproc.v1.Cluster.verify(message.cluster); - if (error) - return "cluster." + error; - } - if (message.requestId != null && message.hasOwnProperty("requestId")) - if (!$util.isString(message.requestId)) - return "requestId: string expected"; - if (message.actionOnFailedPrimaryWorkers != null && message.hasOwnProperty("actionOnFailedPrimaryWorkers")) - switch (message.actionOnFailedPrimaryWorkers) { + /** + * Decodes a Metric message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1.DataprocMetricConfig.Metric} Metric + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Metric.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Metric message. + * @function verify + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Metric.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.metricSource != null && message.hasOwnProperty("metricSource")) + switch (message.metricSource) { + default: + return "metricSource: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + break; + } + if (message.metricOverrides != null && message.hasOwnProperty("metricOverrides")) { + if (!Array.isArray(message.metricOverrides)) + return "metricOverrides: array expected"; + for (var i = 0; i < message.metricOverrides.length; ++i) + if (!$util.isString(message.metricOverrides[i])) + return "metricOverrides: string[] expected"; + } + return null; + }; + + /** + * Creates a Metric message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1.DataprocMetricConfig.Metric} Metric + */ + Metric.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric) + return object; + var message = new $root.google.cloud.dataproc.v1.DataprocMetricConfig.Metric(); + switch (object.metricSource) { default: - return "actionOnFailedPrimaryWorkers: enum value expected"; + if (typeof object.metricSource === "number") { + message.metricSource = object.metricSource; + break; + } + break; + case "METRIC_SOURCE_UNSPECIFIED": case 0: + message.metricSource = 0; + break; + case "MONITORING_AGENT_DEFAULTS": case 1: + message.metricSource = 1; + break; + case "HDFS": case 2: + message.metricSource = 2; + break; + case "SPARK": + case 3: + message.metricSource = 3; + break; + case "YARN": + case 4: + message.metricSource = 4; + break; + case "SPARK_HISTORY_SERVER": + case 5: + message.metricSource = 5; + break; + case "HIVESERVER2": + case 6: + message.metricSource = 6; break; } - return null; - }; + if (object.metricOverrides) { + if (!Array.isArray(object.metricOverrides)) + throw TypeError(".google.cloud.dataproc.v1.DataprocMetricConfig.Metric.metricOverrides: array expected"); + message.metricOverrides = []; + for (var i = 0; i < object.metricOverrides.length; ++i) + message.metricOverrides[i] = String(object.metricOverrides[i]); + } + return message; + }; - /** - * Creates a CreateClusterRequest message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @static - * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.CreateClusterRequest} CreateClusterRequest - */ - CreateClusterRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.CreateClusterRequest) + /** + * Creates a plain object from a Metric message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric + * @static + * @param {google.cloud.dataproc.v1.DataprocMetricConfig.Metric} message Metric + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Metric.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) + object.metricOverrides = []; + if (options.defaults) + object.metricSource = options.enums === String ? "METRIC_SOURCE_UNSPECIFIED" : 0; + if (message.metricSource != null && message.hasOwnProperty("metricSource")) + object.metricSource = options.enums === String ? $root.google.cloud.dataproc.v1.DataprocMetricConfig.MetricSource[message.metricSource] === undefined ? message.metricSource : $root.google.cloud.dataproc.v1.DataprocMetricConfig.MetricSource[message.metricSource] : message.metricSource; + if (message.metricOverrides && message.metricOverrides.length) { + object.metricOverrides = []; + for (var j = 0; j < message.metricOverrides.length; ++j) + object.metricOverrides[j] = message.metricOverrides[j]; + } return object; - var message = new $root.google.cloud.dataproc.v1.CreateClusterRequest(); - if (object.projectId != null) - message.projectId = String(object.projectId); - if (object.region != null) - message.region = String(object.region); - if (object.cluster != null) { - if (typeof object.cluster !== "object") - throw TypeError(".google.cloud.dataproc.v1.CreateClusterRequest.cluster: object expected"); - message.cluster = $root.google.cloud.dataproc.v1.Cluster.fromObject(object.cluster); - } - if (object.requestId != null) - message.requestId = String(object.requestId); - switch (object.actionOnFailedPrimaryWorkers) { - default: - if (typeof object.actionOnFailedPrimaryWorkers === "number") { - message.actionOnFailedPrimaryWorkers = object.actionOnFailedPrimaryWorkers; - break; + }; + + /** + * Converts this Metric to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric + * @instance + * @returns {Object.} JSON object + */ + Metric.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Metric + * @function getTypeUrl + * @memberof google.cloud.dataproc.v1.DataprocMetricConfig.Metric + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Metric.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; } - break; - case "FAILURE_ACTION_UNSPECIFIED": - case 0: - message.actionOnFailedPrimaryWorkers = 0; - break; - case "NO_ACTION": - case 1: - message.actionOnFailedPrimaryWorkers = 1; - break; - case "DELETE": - case 2: - message.actionOnFailedPrimaryWorkers = 2; - break; - } - return message; - }; + return typeUrlPrefix + "/google.cloud.dataproc.v1.DataprocMetricConfig.Metric"; + }; + + return Metric; + })(); + + return DataprocMetricConfig; + })(); + + v1.ClusterMetrics = (function() { /** - * Creates a plain object from a CreateClusterRequest message. Also converts values to other types if specified. - * @function toObject - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @static - * @param {google.cloud.dataproc.v1.CreateClusterRequest} message CreateClusterRequest - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - CreateClusterRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - var object = {}; - if (options.defaults) { - object.projectId = ""; - object.cluster = null; - object.region = ""; - object.requestId = ""; - object.actionOnFailedPrimaryWorkers = options.enums === String ? "FAILURE_ACTION_UNSPECIFIED" : 0; - } - if (message.projectId != null && message.hasOwnProperty("projectId")) - object.projectId = message.projectId; - if (message.cluster != null && message.hasOwnProperty("cluster")) - object.cluster = $root.google.cloud.dataproc.v1.Cluster.toObject(message.cluster, options); - if (message.region != null && message.hasOwnProperty("region")) - object.region = message.region; - if (message.requestId != null && message.hasOwnProperty("requestId")) - object.requestId = message.requestId; - if (message.actionOnFailedPrimaryWorkers != null && message.hasOwnProperty("actionOnFailedPrimaryWorkers")) - object.actionOnFailedPrimaryWorkers = options.enums === String ? $root.google.cloud.dataproc.v1.FailureAction[message.actionOnFailedPrimaryWorkers] === undefined ? message.actionOnFailedPrimaryWorkers : $root.google.cloud.dataproc.v1.FailureAction[message.actionOnFailedPrimaryWorkers] : message.actionOnFailedPrimaryWorkers; - return object; - }; - - /** - * Converts this CreateClusterRequest to JSON. - * @function toJSON - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @instance - * @returns {Object.} JSON object - */ - CreateClusterRequest.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for CreateClusterRequest - * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.CreateClusterRequest - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - CreateClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/google.cloud.dataproc.v1.CreateClusterRequest"; - }; - - return CreateClusterRequest; - })(); - - v1.UpdateClusterRequest = (function() { - - /** - * Properties of an UpdateClusterRequest. - * @memberof google.cloud.dataproc.v1 - * @interface IUpdateClusterRequest - * @property {string|null} [projectId] UpdateClusterRequest projectId - * @property {string|null} [region] UpdateClusterRequest region - * @property {string|null} [clusterName] UpdateClusterRequest clusterName - * @property {google.cloud.dataproc.v1.ICluster|null} [cluster] UpdateClusterRequest cluster - * @property {google.protobuf.IDuration|null} [gracefulDecommissionTimeout] UpdateClusterRequest gracefulDecommissionTimeout - * @property {google.protobuf.IFieldMask|null} [updateMask] UpdateClusterRequest updateMask - * @property {string|null} [requestId] UpdateClusterRequest requestId + * Properties of a ClusterMetrics. + * @memberof google.cloud.dataproc.v1 + * @interface IClusterMetrics + * @property {Object.|null} [hdfsMetrics] ClusterMetrics hdfsMetrics + * @property {Object.|null} [yarnMetrics] ClusterMetrics yarnMetrics */ /** - * Constructs a new UpdateClusterRequest. + * Constructs a new ClusterMetrics. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents an UpdateClusterRequest. - * @implements IUpdateClusterRequest + * @classdesc Represents a ClusterMetrics. + * @implements IClusterMetrics * @constructor - * @param {google.cloud.dataproc.v1.IUpdateClusterRequest=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IClusterMetrics=} [properties] Properties to set */ - function UpdateClusterRequest(properties) { + function ClusterMetrics(properties) { + this.hdfsMetrics = {}; + this.yarnMetrics = {}; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -19338,159 +19343,129 @@ } /** - * UpdateClusterRequest projectId. - * @member {string} projectId - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest - * @instance - */ - UpdateClusterRequest.prototype.projectId = ""; - - /** - * UpdateClusterRequest region. - * @member {string} region - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest - * @instance - */ - UpdateClusterRequest.prototype.region = ""; - - /** - * UpdateClusterRequest clusterName. - * @member {string} clusterName - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest - * @instance - */ - UpdateClusterRequest.prototype.clusterName = ""; - - /** - * UpdateClusterRequest cluster. - * @member {google.cloud.dataproc.v1.ICluster|null|undefined} cluster - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest - * @instance - */ - UpdateClusterRequest.prototype.cluster = null; - - /** - * UpdateClusterRequest gracefulDecommissionTimeout. - * @member {google.protobuf.IDuration|null|undefined} gracefulDecommissionTimeout - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest - * @instance - */ - UpdateClusterRequest.prototype.gracefulDecommissionTimeout = null; - - /** - * UpdateClusterRequest updateMask. - * @member {google.protobuf.IFieldMask|null|undefined} updateMask - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * ClusterMetrics hdfsMetrics. + * @member {Object.} hdfsMetrics + * @memberof google.cloud.dataproc.v1.ClusterMetrics * @instance */ - UpdateClusterRequest.prototype.updateMask = null; + ClusterMetrics.prototype.hdfsMetrics = $util.emptyObject; /** - * UpdateClusterRequest requestId. - * @member {string} requestId - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * ClusterMetrics yarnMetrics. + * @member {Object.} yarnMetrics + * @memberof google.cloud.dataproc.v1.ClusterMetrics * @instance */ - UpdateClusterRequest.prototype.requestId = ""; + ClusterMetrics.prototype.yarnMetrics = $util.emptyObject; /** - * Creates a new UpdateClusterRequest instance using the specified properties. + * Creates a new ClusterMetrics instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * @memberof google.cloud.dataproc.v1.ClusterMetrics * @static - * @param {google.cloud.dataproc.v1.IUpdateClusterRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.UpdateClusterRequest} UpdateClusterRequest instance + * @param {google.cloud.dataproc.v1.IClusterMetrics=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.ClusterMetrics} ClusterMetrics instance */ - UpdateClusterRequest.create = function create(properties) { - return new UpdateClusterRequest(properties); + ClusterMetrics.create = function create(properties) { + return new ClusterMetrics(properties); }; /** - * Encodes the specified UpdateClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.UpdateClusterRequest.verify|verify} messages. + * Encodes the specified ClusterMetrics message. Does not implicitly {@link google.cloud.dataproc.v1.ClusterMetrics.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * @memberof google.cloud.dataproc.v1.ClusterMetrics * @static - * @param {google.cloud.dataproc.v1.IUpdateClusterRequest} message UpdateClusterRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IClusterMetrics} message ClusterMetrics message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateClusterRequest.encode = function encode(message, writer) { + ClusterMetrics.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); - if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.clusterName); - if (message.cluster != null && Object.hasOwnProperty.call(message, "cluster")) - $root.google.cloud.dataproc.v1.Cluster.encode(message.cluster, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.updateMask != null && Object.hasOwnProperty.call(message, "updateMask")) - $root.google.protobuf.FieldMask.encode(message.updateMask, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.region != null && Object.hasOwnProperty.call(message, "region")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.region); - if (message.gracefulDecommissionTimeout != null && Object.hasOwnProperty.call(message, "gracefulDecommissionTimeout")) - $root.google.protobuf.Duration.encode(message.gracefulDecommissionTimeout, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); - if (message.requestId != null && Object.hasOwnProperty.call(message, "requestId")) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.requestId); + if (message.hdfsMetrics != null && Object.hasOwnProperty.call(message, "hdfsMetrics")) + for (var keys = Object.keys(message.hdfsMetrics), i = 0; i < keys.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).int64(message.hdfsMetrics[keys[i]]).ldelim(); + if (message.yarnMetrics != null && Object.hasOwnProperty.call(message, "yarnMetrics")) + for (var keys = Object.keys(message.yarnMetrics), i = 0; i < keys.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).int64(message.yarnMetrics[keys[i]]).ldelim(); return writer; }; /** - * Encodes the specified UpdateClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.UpdateClusterRequest.verify|verify} messages. + * Encodes the specified ClusterMetrics message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ClusterMetrics.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * @memberof google.cloud.dataproc.v1.ClusterMetrics * @static - * @param {google.cloud.dataproc.v1.IUpdateClusterRequest} message UpdateClusterRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IClusterMetrics} message ClusterMetrics message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { + ClusterMetrics.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an UpdateClusterRequest message from the specified reader or buffer. + * Decodes a ClusterMetrics message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * @memberof google.cloud.dataproc.v1.ClusterMetrics * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.UpdateClusterRequest} UpdateClusterRequest + * @returns {google.cloud.dataproc.v1.ClusterMetrics} ClusterMetrics * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateClusterRequest.decode = function decode(reader, length) { + ClusterMetrics.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.UpdateClusterRequest(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ClusterMetrics(), key, value; while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.projectId = reader.string(); - break; - } - case 5: { - message.region = reader.string(); + if (message.hdfsMetrics === $util.emptyObject) + message.hdfsMetrics = {}; + var end2 = reader.uint32() + reader.pos; + key = ""; + value = 0; + while (reader.pos < end2) { + var tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.int64(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.hdfsMetrics[key] = value; break; } case 2: { - message.clusterName = reader.string(); - break; - } - case 3: { - message.cluster = $root.google.cloud.dataproc.v1.Cluster.decode(reader, reader.uint32()); - break; - } - case 6: { - message.gracefulDecommissionTimeout = $root.google.protobuf.Duration.decode(reader, reader.uint32()); - break; - } - case 4: { - message.updateMask = $root.google.protobuf.FieldMask.decode(reader, reader.uint32()); - break; - } - case 7: { - message.requestId = reader.string(); + if (message.yarnMetrics === $util.emptyObject) + message.yarnMetrics = {}; + var end2 = reader.uint32() + reader.pos; + key = ""; + value = 0; + while (reader.pos < end2) { + var tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.int64(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.yarnMetrics[key] = value; break; } default: @@ -19502,190 +19477,182 @@ }; /** - * Decodes an UpdateClusterRequest message from the specified reader or buffer, length delimited. + * Decodes a ClusterMetrics message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * @memberof google.cloud.dataproc.v1.ClusterMetrics * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.UpdateClusterRequest} UpdateClusterRequest + * @returns {google.cloud.dataproc.v1.ClusterMetrics} ClusterMetrics * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateClusterRequest.decodeDelimited = function decodeDelimited(reader) { + ClusterMetrics.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an UpdateClusterRequest message. + * Verifies a ClusterMetrics message. * @function verify - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * @memberof google.cloud.dataproc.v1.ClusterMetrics * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - UpdateClusterRequest.verify = function verify(message) { + ClusterMetrics.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.projectId != null && message.hasOwnProperty("projectId")) - if (!$util.isString(message.projectId)) - return "projectId: string expected"; - if (message.region != null && message.hasOwnProperty("region")) - if (!$util.isString(message.region)) - return "region: string expected"; - if (message.clusterName != null && message.hasOwnProperty("clusterName")) - if (!$util.isString(message.clusterName)) - return "clusterName: string expected"; - if (message.cluster != null && message.hasOwnProperty("cluster")) { - var error = $root.google.cloud.dataproc.v1.Cluster.verify(message.cluster); - if (error) - return "cluster." + error; - } - if (message.gracefulDecommissionTimeout != null && message.hasOwnProperty("gracefulDecommissionTimeout")) { - var error = $root.google.protobuf.Duration.verify(message.gracefulDecommissionTimeout); - if (error) - return "gracefulDecommissionTimeout." + error; + if (message.hdfsMetrics != null && message.hasOwnProperty("hdfsMetrics")) { + if (!$util.isObject(message.hdfsMetrics)) + return "hdfsMetrics: object expected"; + var key = Object.keys(message.hdfsMetrics); + for (var i = 0; i < key.length; ++i) + if (!$util.isInteger(message.hdfsMetrics[key[i]]) && !(message.hdfsMetrics[key[i]] && $util.isInteger(message.hdfsMetrics[key[i]].low) && $util.isInteger(message.hdfsMetrics[key[i]].high))) + return "hdfsMetrics: integer|Long{k:string} expected"; } - if (message.updateMask != null && message.hasOwnProperty("updateMask")) { - var error = $root.google.protobuf.FieldMask.verify(message.updateMask); - if (error) - return "updateMask." + error; + if (message.yarnMetrics != null && message.hasOwnProperty("yarnMetrics")) { + if (!$util.isObject(message.yarnMetrics)) + return "yarnMetrics: object expected"; + var key = Object.keys(message.yarnMetrics); + for (var i = 0; i < key.length; ++i) + if (!$util.isInteger(message.yarnMetrics[key[i]]) && !(message.yarnMetrics[key[i]] && $util.isInteger(message.yarnMetrics[key[i]].low) && $util.isInteger(message.yarnMetrics[key[i]].high))) + return "yarnMetrics: integer|Long{k:string} expected"; } - if (message.requestId != null && message.hasOwnProperty("requestId")) - if (!$util.isString(message.requestId)) - return "requestId: string expected"; return null; }; /** - * Creates an UpdateClusterRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ClusterMetrics message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * @memberof google.cloud.dataproc.v1.ClusterMetrics * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.UpdateClusterRequest} UpdateClusterRequest + * @returns {google.cloud.dataproc.v1.ClusterMetrics} ClusterMetrics */ - UpdateClusterRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.UpdateClusterRequest) + ClusterMetrics.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.ClusterMetrics) return object; - var message = new $root.google.cloud.dataproc.v1.UpdateClusterRequest(); - if (object.projectId != null) - message.projectId = String(object.projectId); - if (object.region != null) - message.region = String(object.region); - if (object.clusterName != null) - message.clusterName = String(object.clusterName); - if (object.cluster != null) { - if (typeof object.cluster !== "object") - throw TypeError(".google.cloud.dataproc.v1.UpdateClusterRequest.cluster: object expected"); - message.cluster = $root.google.cloud.dataproc.v1.Cluster.fromObject(object.cluster); - } - if (object.gracefulDecommissionTimeout != null) { - if (typeof object.gracefulDecommissionTimeout !== "object") - throw TypeError(".google.cloud.dataproc.v1.UpdateClusterRequest.gracefulDecommissionTimeout: object expected"); - message.gracefulDecommissionTimeout = $root.google.protobuf.Duration.fromObject(object.gracefulDecommissionTimeout); + var message = new $root.google.cloud.dataproc.v1.ClusterMetrics(); + if (object.hdfsMetrics) { + if (typeof object.hdfsMetrics !== "object") + throw TypeError(".google.cloud.dataproc.v1.ClusterMetrics.hdfsMetrics: object expected"); + message.hdfsMetrics = {}; + for (var keys = Object.keys(object.hdfsMetrics), i = 0; i < keys.length; ++i) + if ($util.Long) + (message.hdfsMetrics[keys[i]] = $util.Long.fromValue(object.hdfsMetrics[keys[i]])).unsigned = false; + else if (typeof object.hdfsMetrics[keys[i]] === "string") + message.hdfsMetrics[keys[i]] = parseInt(object.hdfsMetrics[keys[i]], 10); + else if (typeof object.hdfsMetrics[keys[i]] === "number") + message.hdfsMetrics[keys[i]] = object.hdfsMetrics[keys[i]]; + else if (typeof object.hdfsMetrics[keys[i]] === "object") + message.hdfsMetrics[keys[i]] = new $util.LongBits(object.hdfsMetrics[keys[i]].low >>> 0, object.hdfsMetrics[keys[i]].high >>> 0).toNumber(); } - if (object.updateMask != null) { - if (typeof object.updateMask !== "object") - throw TypeError(".google.cloud.dataproc.v1.UpdateClusterRequest.updateMask: object expected"); - message.updateMask = $root.google.protobuf.FieldMask.fromObject(object.updateMask); + if (object.yarnMetrics) { + if (typeof object.yarnMetrics !== "object") + throw TypeError(".google.cloud.dataproc.v1.ClusterMetrics.yarnMetrics: object expected"); + message.yarnMetrics = {}; + for (var keys = Object.keys(object.yarnMetrics), i = 0; i < keys.length; ++i) + if ($util.Long) + (message.yarnMetrics[keys[i]] = $util.Long.fromValue(object.yarnMetrics[keys[i]])).unsigned = false; + else if (typeof object.yarnMetrics[keys[i]] === "string") + message.yarnMetrics[keys[i]] = parseInt(object.yarnMetrics[keys[i]], 10); + else if (typeof object.yarnMetrics[keys[i]] === "number") + message.yarnMetrics[keys[i]] = object.yarnMetrics[keys[i]]; + else if (typeof object.yarnMetrics[keys[i]] === "object") + message.yarnMetrics[keys[i]] = new $util.LongBits(object.yarnMetrics[keys[i]].low >>> 0, object.yarnMetrics[keys[i]].high >>> 0).toNumber(); } - if (object.requestId != null) - message.requestId = String(object.requestId); return message; }; /** - * Creates a plain object from an UpdateClusterRequest message. Also converts values to other types if specified. + * Creates a plain object from a ClusterMetrics message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * @memberof google.cloud.dataproc.v1.ClusterMetrics * @static - * @param {google.cloud.dataproc.v1.UpdateClusterRequest} message UpdateClusterRequest + * @param {google.cloud.dataproc.v1.ClusterMetrics} message ClusterMetrics * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - UpdateClusterRequest.toObject = function toObject(message, options) { + ClusterMetrics.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.defaults) { - object.projectId = ""; - object.clusterName = ""; - object.cluster = null; - object.updateMask = null; - object.region = ""; - object.gracefulDecommissionTimeout = null; - object.requestId = ""; + if (options.objects || options.defaults) { + object.hdfsMetrics = {}; + object.yarnMetrics = {}; + } + var keys2; + if (message.hdfsMetrics && (keys2 = Object.keys(message.hdfsMetrics)).length) { + object.hdfsMetrics = {}; + for (var j = 0; j < keys2.length; ++j) + if (typeof message.hdfsMetrics[keys2[j]] === "number") + object.hdfsMetrics[keys2[j]] = options.longs === String ? String(message.hdfsMetrics[keys2[j]]) : message.hdfsMetrics[keys2[j]]; + else + object.hdfsMetrics[keys2[j]] = options.longs === String ? $util.Long.prototype.toString.call(message.hdfsMetrics[keys2[j]]) : options.longs === Number ? new $util.LongBits(message.hdfsMetrics[keys2[j]].low >>> 0, message.hdfsMetrics[keys2[j]].high >>> 0).toNumber() : message.hdfsMetrics[keys2[j]]; + } + if (message.yarnMetrics && (keys2 = Object.keys(message.yarnMetrics)).length) { + object.yarnMetrics = {}; + for (var j = 0; j < keys2.length; ++j) + if (typeof message.yarnMetrics[keys2[j]] === "number") + object.yarnMetrics[keys2[j]] = options.longs === String ? String(message.yarnMetrics[keys2[j]]) : message.yarnMetrics[keys2[j]]; + else + object.yarnMetrics[keys2[j]] = options.longs === String ? $util.Long.prototype.toString.call(message.yarnMetrics[keys2[j]]) : options.longs === Number ? new $util.LongBits(message.yarnMetrics[keys2[j]].low >>> 0, message.yarnMetrics[keys2[j]].high >>> 0).toNumber() : message.yarnMetrics[keys2[j]]; } - if (message.projectId != null && message.hasOwnProperty("projectId")) - object.projectId = message.projectId; - if (message.clusterName != null && message.hasOwnProperty("clusterName")) - object.clusterName = message.clusterName; - if (message.cluster != null && message.hasOwnProperty("cluster")) - object.cluster = $root.google.cloud.dataproc.v1.Cluster.toObject(message.cluster, options); - if (message.updateMask != null && message.hasOwnProperty("updateMask")) - object.updateMask = $root.google.protobuf.FieldMask.toObject(message.updateMask, options); - if (message.region != null && message.hasOwnProperty("region")) - object.region = message.region; - if (message.gracefulDecommissionTimeout != null && message.hasOwnProperty("gracefulDecommissionTimeout")) - object.gracefulDecommissionTimeout = $root.google.protobuf.Duration.toObject(message.gracefulDecommissionTimeout, options); - if (message.requestId != null && message.hasOwnProperty("requestId")) - object.requestId = message.requestId; return object; }; /** - * Converts this UpdateClusterRequest to JSON. + * Converts this ClusterMetrics to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * @memberof google.cloud.dataproc.v1.ClusterMetrics * @instance * @returns {Object.} JSON object */ - UpdateClusterRequest.prototype.toJSON = function toJSON() { + ClusterMetrics.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for UpdateClusterRequest + * Gets the default type url for ClusterMetrics * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * @memberof google.cloud.dataproc.v1.ClusterMetrics * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - UpdateClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ClusterMetrics.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.UpdateClusterRequest"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.ClusterMetrics"; }; - return UpdateClusterRequest; + return ClusterMetrics; })(); - v1.StopClusterRequest = (function() { + v1.CreateClusterRequest = (function() { /** - * Properties of a StopClusterRequest. + * Properties of a CreateClusterRequest. * @memberof google.cloud.dataproc.v1 - * @interface IStopClusterRequest - * @property {string|null} [projectId] StopClusterRequest projectId - * @property {string|null} [region] StopClusterRequest region - * @property {string|null} [clusterName] StopClusterRequest clusterName - * @property {string|null} [clusterUuid] StopClusterRequest clusterUuid - * @property {string|null} [requestId] StopClusterRequest requestId + * @interface ICreateClusterRequest + * @property {string|null} [projectId] CreateClusterRequest projectId + * @property {string|null} [region] CreateClusterRequest region + * @property {google.cloud.dataproc.v1.ICluster|null} [cluster] CreateClusterRequest cluster + * @property {string|null} [requestId] CreateClusterRequest requestId + * @property {google.cloud.dataproc.v1.FailureAction|null} [actionOnFailedPrimaryWorkers] CreateClusterRequest actionOnFailedPrimaryWorkers */ /** - * Constructs a new StopClusterRequest. + * Constructs a new CreateClusterRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a StopClusterRequest. - * @implements IStopClusterRequest + * @classdesc Represents a CreateClusterRequest. + * @implements ICreateClusterRequest * @constructor - * @param {google.cloud.dataproc.v1.IStopClusterRequest=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.ICreateClusterRequest=} [properties] Properties to set */ - function StopClusterRequest(properties) { + function CreateClusterRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -19693,110 +19660,110 @@ } /** - * StopClusterRequest projectId. + * CreateClusterRequest projectId. * @member {string} projectId - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @instance */ - StopClusterRequest.prototype.projectId = ""; + CreateClusterRequest.prototype.projectId = ""; /** - * StopClusterRequest region. + * CreateClusterRequest region. * @member {string} region - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @instance */ - StopClusterRequest.prototype.region = ""; + CreateClusterRequest.prototype.region = ""; /** - * StopClusterRequest clusterName. - * @member {string} clusterName - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * CreateClusterRequest cluster. + * @member {google.cloud.dataproc.v1.ICluster|null|undefined} cluster + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @instance */ - StopClusterRequest.prototype.clusterName = ""; + CreateClusterRequest.prototype.cluster = null; /** - * StopClusterRequest clusterUuid. - * @member {string} clusterUuid - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * CreateClusterRequest requestId. + * @member {string} requestId + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @instance */ - StopClusterRequest.prototype.clusterUuid = ""; + CreateClusterRequest.prototype.requestId = ""; /** - * StopClusterRequest requestId. - * @member {string} requestId - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * CreateClusterRequest actionOnFailedPrimaryWorkers. + * @member {google.cloud.dataproc.v1.FailureAction} actionOnFailedPrimaryWorkers + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @instance */ - StopClusterRequest.prototype.requestId = ""; + CreateClusterRequest.prototype.actionOnFailedPrimaryWorkers = 0; /** - * Creates a new StopClusterRequest instance using the specified properties. + * Creates a new CreateClusterRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @static - * @param {google.cloud.dataproc.v1.IStopClusterRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.StopClusterRequest} StopClusterRequest instance + * @param {google.cloud.dataproc.v1.ICreateClusterRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.CreateClusterRequest} CreateClusterRequest instance */ - StopClusterRequest.create = function create(properties) { - return new StopClusterRequest(properties); + CreateClusterRequest.create = function create(properties) { + return new CreateClusterRequest(properties); }; /** - * Encodes the specified StopClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.StopClusterRequest.verify|verify} messages. + * Encodes the specified CreateClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.CreateClusterRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @static - * @param {google.cloud.dataproc.v1.IStopClusterRequest} message StopClusterRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.ICreateClusterRequest} message CreateClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StopClusterRequest.encode = function encode(message, writer) { + CreateClusterRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); + if (message.cluster != null && Object.hasOwnProperty.call(message, "cluster")) + $root.google.cloud.dataproc.v1.Cluster.encode(message.cluster, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.region != null && Object.hasOwnProperty.call(message, "region")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.region); - if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.clusterName); - if (message.clusterUuid != null && Object.hasOwnProperty.call(message, "clusterUuid")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.clusterUuid); + writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); if (message.requestId != null && Object.hasOwnProperty.call(message, "requestId")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.requestId); + writer.uint32(/* id 4, wireType 2 =*/34).string(message.requestId); + if (message.actionOnFailedPrimaryWorkers != null && Object.hasOwnProperty.call(message, "actionOnFailedPrimaryWorkers")) + writer.uint32(/* id 5, wireType 0 =*/40).int32(message.actionOnFailedPrimaryWorkers); return writer; }; /** - * Encodes the specified StopClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.StopClusterRequest.verify|verify} messages. + * Encodes the specified CreateClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.CreateClusterRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @static - * @param {google.cloud.dataproc.v1.IStopClusterRequest} message StopClusterRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.ICreateClusterRequest} message CreateClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StopClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { + CreateClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StopClusterRequest message from the specified reader or buffer. + * Decodes a CreateClusterRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.StopClusterRequest} StopClusterRequest + * @returns {google.cloud.dataproc.v1.CreateClusterRequest} CreateClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StopClusterRequest.decode = function decode(reader, length) { + CreateClusterRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.StopClusterRequest(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.CreateClusterRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { @@ -19804,20 +19771,20 @@ message.projectId = reader.string(); break; } - case 2: { + case 3: { message.region = reader.string(); break; } - case 3: { - message.clusterName = reader.string(); + case 2: { + message.cluster = $root.google.cloud.dataproc.v1.Cluster.decode(reader, reader.uint32()); break; } case 4: { - message.clusterUuid = reader.string(); + message.requestId = reader.string(); break; } case 5: { - message.requestId = reader.string(); + message.actionOnFailedPrimaryWorkers = reader.int32(); break; } default: @@ -19829,30 +19796,30 @@ }; /** - * Decodes a StopClusterRequest message from the specified reader or buffer, length delimited. + * Decodes a CreateClusterRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.StopClusterRequest} StopClusterRequest + * @returns {google.cloud.dataproc.v1.CreateClusterRequest} CreateClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StopClusterRequest.decodeDelimited = function decodeDelimited(reader) { + CreateClusterRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StopClusterRequest message. + * Verifies a CreateClusterRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StopClusterRequest.verify = function verify(message) { + CreateClusterRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.projectId != null && message.hasOwnProperty("projectId")) @@ -19861,127 +19828,158 @@ if (message.region != null && message.hasOwnProperty("region")) if (!$util.isString(message.region)) return "region: string expected"; - if (message.clusterName != null && message.hasOwnProperty("clusterName")) - if (!$util.isString(message.clusterName)) - return "clusterName: string expected"; - if (message.clusterUuid != null && message.hasOwnProperty("clusterUuid")) - if (!$util.isString(message.clusterUuid)) - return "clusterUuid: string expected"; + if (message.cluster != null && message.hasOwnProperty("cluster")) { + var error = $root.google.cloud.dataproc.v1.Cluster.verify(message.cluster); + if (error) + return "cluster." + error; + } if (message.requestId != null && message.hasOwnProperty("requestId")) if (!$util.isString(message.requestId)) return "requestId: string expected"; + if (message.actionOnFailedPrimaryWorkers != null && message.hasOwnProperty("actionOnFailedPrimaryWorkers")) + switch (message.actionOnFailedPrimaryWorkers) { + default: + return "actionOnFailedPrimaryWorkers: enum value expected"; + case 0: + case 1: + case 2: + break; + } return null; }; /** - * Creates a StopClusterRequest message from a plain object. Also converts values to their respective internal types. + * Creates a CreateClusterRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.StopClusterRequest} StopClusterRequest + * @returns {google.cloud.dataproc.v1.CreateClusterRequest} CreateClusterRequest */ - StopClusterRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.StopClusterRequest) + CreateClusterRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.CreateClusterRequest) return object; - var message = new $root.google.cloud.dataproc.v1.StopClusterRequest(); + var message = new $root.google.cloud.dataproc.v1.CreateClusterRequest(); if (object.projectId != null) message.projectId = String(object.projectId); if (object.region != null) message.region = String(object.region); - if (object.clusterName != null) - message.clusterName = String(object.clusterName); - if (object.clusterUuid != null) - message.clusterUuid = String(object.clusterUuid); + if (object.cluster != null) { + if (typeof object.cluster !== "object") + throw TypeError(".google.cloud.dataproc.v1.CreateClusterRequest.cluster: object expected"); + message.cluster = $root.google.cloud.dataproc.v1.Cluster.fromObject(object.cluster); + } if (object.requestId != null) message.requestId = String(object.requestId); + switch (object.actionOnFailedPrimaryWorkers) { + default: + if (typeof object.actionOnFailedPrimaryWorkers === "number") { + message.actionOnFailedPrimaryWorkers = object.actionOnFailedPrimaryWorkers; + break; + } + break; + case "FAILURE_ACTION_UNSPECIFIED": + case 0: + message.actionOnFailedPrimaryWorkers = 0; + break; + case "NO_ACTION": + case 1: + message.actionOnFailedPrimaryWorkers = 1; + break; + case "DELETE": + case 2: + message.actionOnFailedPrimaryWorkers = 2; + break; + } return message; }; /** - * Creates a plain object from a StopClusterRequest message. Also converts values to other types if specified. + * Creates a plain object from a CreateClusterRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @static - * @param {google.cloud.dataproc.v1.StopClusterRequest} message StopClusterRequest + * @param {google.cloud.dataproc.v1.CreateClusterRequest} message CreateClusterRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StopClusterRequest.toObject = function toObject(message, options) { + CreateClusterRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { object.projectId = ""; + object.cluster = null; object.region = ""; - object.clusterName = ""; - object.clusterUuid = ""; object.requestId = ""; + object.actionOnFailedPrimaryWorkers = options.enums === String ? "FAILURE_ACTION_UNSPECIFIED" : 0; } if (message.projectId != null && message.hasOwnProperty("projectId")) object.projectId = message.projectId; + if (message.cluster != null && message.hasOwnProperty("cluster")) + object.cluster = $root.google.cloud.dataproc.v1.Cluster.toObject(message.cluster, options); if (message.region != null && message.hasOwnProperty("region")) object.region = message.region; - if (message.clusterName != null && message.hasOwnProperty("clusterName")) - object.clusterName = message.clusterName; - if (message.clusterUuid != null && message.hasOwnProperty("clusterUuid")) - object.clusterUuid = message.clusterUuid; if (message.requestId != null && message.hasOwnProperty("requestId")) object.requestId = message.requestId; + if (message.actionOnFailedPrimaryWorkers != null && message.hasOwnProperty("actionOnFailedPrimaryWorkers")) + object.actionOnFailedPrimaryWorkers = options.enums === String ? $root.google.cloud.dataproc.v1.FailureAction[message.actionOnFailedPrimaryWorkers] === undefined ? message.actionOnFailedPrimaryWorkers : $root.google.cloud.dataproc.v1.FailureAction[message.actionOnFailedPrimaryWorkers] : message.actionOnFailedPrimaryWorkers; return object; }; /** - * Converts this StopClusterRequest to JSON. + * Converts this CreateClusterRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @instance * @returns {Object.} JSON object */ - StopClusterRequest.prototype.toJSON = function toJSON() { + CreateClusterRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StopClusterRequest + * Gets the default type url for CreateClusterRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.StopClusterRequest + * @memberof google.cloud.dataproc.v1.CreateClusterRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StopClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CreateClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.StopClusterRequest"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.CreateClusterRequest"; }; - return StopClusterRequest; + return CreateClusterRequest; })(); - v1.StartClusterRequest = (function() { + v1.UpdateClusterRequest = (function() { /** - * Properties of a StartClusterRequest. + * Properties of an UpdateClusterRequest. * @memberof google.cloud.dataproc.v1 - * @interface IStartClusterRequest - * @property {string|null} [projectId] StartClusterRequest projectId - * @property {string|null} [region] StartClusterRequest region - * @property {string|null} [clusterName] StartClusterRequest clusterName - * @property {string|null} [clusterUuid] StartClusterRequest clusterUuid - * @property {string|null} [requestId] StartClusterRequest requestId + * @interface IUpdateClusterRequest + * @property {string|null} [projectId] UpdateClusterRequest projectId + * @property {string|null} [region] UpdateClusterRequest region + * @property {string|null} [clusterName] UpdateClusterRequest clusterName + * @property {google.cloud.dataproc.v1.ICluster|null} [cluster] UpdateClusterRequest cluster + * @property {google.protobuf.IDuration|null} [gracefulDecommissionTimeout] UpdateClusterRequest gracefulDecommissionTimeout + * @property {google.protobuf.IFieldMask|null} [updateMask] UpdateClusterRequest updateMask + * @property {string|null} [requestId] UpdateClusterRequest requestId */ /** - * Constructs a new StartClusterRequest. + * Constructs a new UpdateClusterRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a StartClusterRequest. - * @implements IStartClusterRequest + * @classdesc Represents an UpdateClusterRequest. + * @implements IUpdateClusterRequest * @constructor - * @param {google.cloud.dataproc.v1.IStartClusterRequest=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IUpdateClusterRequest=} [properties] Properties to set */ - function StartClusterRequest(properties) { + function UpdateClusterRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -19989,110 +19987,130 @@ } /** - * StartClusterRequest projectId. + * UpdateClusterRequest projectId. * @member {string} projectId - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @instance */ - StartClusterRequest.prototype.projectId = ""; + UpdateClusterRequest.prototype.projectId = ""; /** - * StartClusterRequest region. + * UpdateClusterRequest region. * @member {string} region - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @instance */ - StartClusterRequest.prototype.region = ""; + UpdateClusterRequest.prototype.region = ""; /** - * StartClusterRequest clusterName. + * UpdateClusterRequest clusterName. * @member {string} clusterName - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @instance */ - StartClusterRequest.prototype.clusterName = ""; + UpdateClusterRequest.prototype.clusterName = ""; /** - * StartClusterRequest clusterUuid. - * @member {string} clusterUuid - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * UpdateClusterRequest cluster. + * @member {google.cloud.dataproc.v1.ICluster|null|undefined} cluster + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @instance */ - StartClusterRequest.prototype.clusterUuid = ""; + UpdateClusterRequest.prototype.cluster = null; /** - * StartClusterRequest requestId. + * UpdateClusterRequest gracefulDecommissionTimeout. + * @member {google.protobuf.IDuration|null|undefined} gracefulDecommissionTimeout + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * @instance + */ + UpdateClusterRequest.prototype.gracefulDecommissionTimeout = null; + + /** + * UpdateClusterRequest updateMask. + * @member {google.protobuf.IFieldMask|null|undefined} updateMask + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest + * @instance + */ + UpdateClusterRequest.prototype.updateMask = null; + + /** + * UpdateClusterRequest requestId. * @member {string} requestId - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @instance */ - StartClusterRequest.prototype.requestId = ""; + UpdateClusterRequest.prototype.requestId = ""; /** - * Creates a new StartClusterRequest instance using the specified properties. + * Creates a new UpdateClusterRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @static - * @param {google.cloud.dataproc.v1.IStartClusterRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.StartClusterRequest} StartClusterRequest instance + * @param {google.cloud.dataproc.v1.IUpdateClusterRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.UpdateClusterRequest} UpdateClusterRequest instance */ - StartClusterRequest.create = function create(properties) { - return new StartClusterRequest(properties); + UpdateClusterRequest.create = function create(properties) { + return new UpdateClusterRequest(properties); }; /** - * Encodes the specified StartClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.StartClusterRequest.verify|verify} messages. + * Encodes the specified UpdateClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.UpdateClusterRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @static - * @param {google.cloud.dataproc.v1.IStartClusterRequest} message StartClusterRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IUpdateClusterRequest} message UpdateClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StartClusterRequest.encode = function encode(message, writer) { + UpdateClusterRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); - if (message.region != null && Object.hasOwnProperty.call(message, "region")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.region); if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.clusterName); - if (message.clusterUuid != null && Object.hasOwnProperty.call(message, "clusterUuid")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.clusterUuid); + writer.uint32(/* id 2, wireType 2 =*/18).string(message.clusterName); + if (message.cluster != null && Object.hasOwnProperty.call(message, "cluster")) + $root.google.cloud.dataproc.v1.Cluster.encode(message.cluster, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.updateMask != null && Object.hasOwnProperty.call(message, "updateMask")) + $root.google.protobuf.FieldMask.encode(message.updateMask, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.region != null && Object.hasOwnProperty.call(message, "region")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.region); + if (message.gracefulDecommissionTimeout != null && Object.hasOwnProperty.call(message, "gracefulDecommissionTimeout")) + $root.google.protobuf.Duration.encode(message.gracefulDecommissionTimeout, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); if (message.requestId != null && Object.hasOwnProperty.call(message, "requestId")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.requestId); + writer.uint32(/* id 7, wireType 2 =*/58).string(message.requestId); return writer; }; /** - * Encodes the specified StartClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.StartClusterRequest.verify|verify} messages. + * Encodes the specified UpdateClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.UpdateClusterRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @static - * @param {google.cloud.dataproc.v1.IStartClusterRequest} message StartClusterRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IUpdateClusterRequest} message UpdateClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StartClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { + UpdateClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StartClusterRequest message from the specified reader or buffer. + * Decodes an UpdateClusterRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.StartClusterRequest} StartClusterRequest + * @returns {google.cloud.dataproc.v1.UpdateClusterRequest} UpdateClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StartClusterRequest.decode = function decode(reader, length) { + UpdateClusterRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.StartClusterRequest(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.UpdateClusterRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { @@ -20100,19 +20118,27 @@ message.projectId = reader.string(); break; } - case 2: { + case 5: { message.region = reader.string(); break; } - case 3: { + case 2: { message.clusterName = reader.string(); break; } + case 3: { + message.cluster = $root.google.cloud.dataproc.v1.Cluster.decode(reader, reader.uint32()); + break; + } + case 6: { + message.gracefulDecommissionTimeout = $root.google.protobuf.Duration.decode(reader, reader.uint32()); + break; + } case 4: { - message.clusterUuid = reader.string(); + message.updateMask = $root.google.protobuf.FieldMask.decode(reader, reader.uint32()); break; } - case 5: { + case 7: { message.requestId = reader.string(); break; } @@ -20125,30 +20151,30 @@ }; /** - * Decodes a StartClusterRequest message from the specified reader or buffer, length delimited. + * Decodes an UpdateClusterRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.StartClusterRequest} StartClusterRequest + * @returns {google.cloud.dataproc.v1.UpdateClusterRequest} UpdateClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StartClusterRequest.decodeDelimited = function decodeDelimited(reader) { + UpdateClusterRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StartClusterRequest message. + * Verifies an UpdateClusterRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StartClusterRequest.verify = function verify(message) { + UpdateClusterRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.projectId != null && message.hasOwnProperty("projectId")) @@ -20160,9 +20186,21 @@ if (message.clusterName != null && message.hasOwnProperty("clusterName")) if (!$util.isString(message.clusterName)) return "clusterName: string expected"; - if (message.clusterUuid != null && message.hasOwnProperty("clusterUuid")) - if (!$util.isString(message.clusterUuid)) - return "clusterUuid: string expected"; + if (message.cluster != null && message.hasOwnProperty("cluster")) { + var error = $root.google.cloud.dataproc.v1.Cluster.verify(message.cluster); + if (error) + return "cluster." + error; + } + if (message.gracefulDecommissionTimeout != null && message.hasOwnProperty("gracefulDecommissionTimeout")) { + var error = $root.google.protobuf.Duration.verify(message.gracefulDecommissionTimeout); + if (error) + return "gracefulDecommissionTimeout." + error; + } + if (message.updateMask != null && message.hasOwnProperty("updateMask")) { + var error = $root.google.protobuf.FieldMask.verify(message.updateMask); + if (error) + return "updateMask." + error; + } if (message.requestId != null && message.hasOwnProperty("requestId")) if (!$util.isString(message.requestId)) return "requestId: string expected"; @@ -20170,114 +20208,133 @@ }; /** - * Creates a StartClusterRequest message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateClusterRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.StartClusterRequest} StartClusterRequest + * @returns {google.cloud.dataproc.v1.UpdateClusterRequest} UpdateClusterRequest */ - StartClusterRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.StartClusterRequest) + UpdateClusterRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.UpdateClusterRequest) return object; - var message = new $root.google.cloud.dataproc.v1.StartClusterRequest(); + var message = new $root.google.cloud.dataproc.v1.UpdateClusterRequest(); if (object.projectId != null) message.projectId = String(object.projectId); if (object.region != null) message.region = String(object.region); if (object.clusterName != null) message.clusterName = String(object.clusterName); - if (object.clusterUuid != null) - message.clusterUuid = String(object.clusterUuid); + if (object.cluster != null) { + if (typeof object.cluster !== "object") + throw TypeError(".google.cloud.dataproc.v1.UpdateClusterRequest.cluster: object expected"); + message.cluster = $root.google.cloud.dataproc.v1.Cluster.fromObject(object.cluster); + } + if (object.gracefulDecommissionTimeout != null) { + if (typeof object.gracefulDecommissionTimeout !== "object") + throw TypeError(".google.cloud.dataproc.v1.UpdateClusterRequest.gracefulDecommissionTimeout: object expected"); + message.gracefulDecommissionTimeout = $root.google.protobuf.Duration.fromObject(object.gracefulDecommissionTimeout); + } + if (object.updateMask != null) { + if (typeof object.updateMask !== "object") + throw TypeError(".google.cloud.dataproc.v1.UpdateClusterRequest.updateMask: object expected"); + message.updateMask = $root.google.protobuf.FieldMask.fromObject(object.updateMask); + } if (object.requestId != null) message.requestId = String(object.requestId); return message; }; /** - * Creates a plain object from a StartClusterRequest message. Also converts values to other types if specified. + * Creates a plain object from an UpdateClusterRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @static - * @param {google.cloud.dataproc.v1.StartClusterRequest} message StartClusterRequest + * @param {google.cloud.dataproc.v1.UpdateClusterRequest} message UpdateClusterRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StartClusterRequest.toObject = function toObject(message, options) { + UpdateClusterRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { object.projectId = ""; - object.region = ""; object.clusterName = ""; - object.clusterUuid = ""; + object.cluster = null; + object.updateMask = null; + object.region = ""; + object.gracefulDecommissionTimeout = null; object.requestId = ""; } if (message.projectId != null && message.hasOwnProperty("projectId")) object.projectId = message.projectId; - if (message.region != null && message.hasOwnProperty("region")) - object.region = message.region; if (message.clusterName != null && message.hasOwnProperty("clusterName")) object.clusterName = message.clusterName; - if (message.clusterUuid != null && message.hasOwnProperty("clusterUuid")) - object.clusterUuid = message.clusterUuid; + if (message.cluster != null && message.hasOwnProperty("cluster")) + object.cluster = $root.google.cloud.dataproc.v1.Cluster.toObject(message.cluster, options); + if (message.updateMask != null && message.hasOwnProperty("updateMask")) + object.updateMask = $root.google.protobuf.FieldMask.toObject(message.updateMask, options); + if (message.region != null && message.hasOwnProperty("region")) + object.region = message.region; + if (message.gracefulDecommissionTimeout != null && message.hasOwnProperty("gracefulDecommissionTimeout")) + object.gracefulDecommissionTimeout = $root.google.protobuf.Duration.toObject(message.gracefulDecommissionTimeout, options); if (message.requestId != null && message.hasOwnProperty("requestId")) object.requestId = message.requestId; return object; }; /** - * Converts this StartClusterRequest to JSON. + * Converts this UpdateClusterRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @instance * @returns {Object.} JSON object */ - StartClusterRequest.prototype.toJSON = function toJSON() { + UpdateClusterRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StartClusterRequest + * Gets the default type url for UpdateClusterRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @memberof google.cloud.dataproc.v1.UpdateClusterRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StartClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + UpdateClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.StartClusterRequest"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.UpdateClusterRequest"; }; - return StartClusterRequest; + return UpdateClusterRequest; })(); - v1.DeleteClusterRequest = (function() { + v1.StopClusterRequest = (function() { /** - * Properties of a DeleteClusterRequest. + * Properties of a StopClusterRequest. * @memberof google.cloud.dataproc.v1 - * @interface IDeleteClusterRequest - * @property {string|null} [projectId] DeleteClusterRequest projectId - * @property {string|null} [region] DeleteClusterRequest region - * @property {string|null} [clusterName] DeleteClusterRequest clusterName - * @property {string|null} [clusterUuid] DeleteClusterRequest clusterUuid - * @property {string|null} [requestId] DeleteClusterRequest requestId + * @interface IStopClusterRequest + * @property {string|null} [projectId] StopClusterRequest projectId + * @property {string|null} [region] StopClusterRequest region + * @property {string|null} [clusterName] StopClusterRequest clusterName + * @property {string|null} [clusterUuid] StopClusterRequest clusterUuid + * @property {string|null} [requestId] StopClusterRequest requestId */ /** - * Constructs a new DeleteClusterRequest. + * Constructs a new StopClusterRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a DeleteClusterRequest. - * @implements IDeleteClusterRequest + * @classdesc Represents a StopClusterRequest. + * @implements IStopClusterRequest * @constructor - * @param {google.cloud.dataproc.v1.IDeleteClusterRequest=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IStopClusterRequest=} [properties] Properties to set */ - function DeleteClusterRequest(properties) { + function StopClusterRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -20285,75 +20342,75 @@ } /** - * DeleteClusterRequest projectId. + * StopClusterRequest projectId. * @member {string} projectId - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @instance */ - DeleteClusterRequest.prototype.projectId = ""; + StopClusterRequest.prototype.projectId = ""; /** - * DeleteClusterRequest region. + * StopClusterRequest region. * @member {string} region - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @instance */ - DeleteClusterRequest.prototype.region = ""; + StopClusterRequest.prototype.region = ""; /** - * DeleteClusterRequest clusterName. + * StopClusterRequest clusterName. * @member {string} clusterName - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @instance */ - DeleteClusterRequest.prototype.clusterName = ""; + StopClusterRequest.prototype.clusterName = ""; /** - * DeleteClusterRequest clusterUuid. + * StopClusterRequest clusterUuid. * @member {string} clusterUuid - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @instance */ - DeleteClusterRequest.prototype.clusterUuid = ""; + StopClusterRequest.prototype.clusterUuid = ""; /** - * DeleteClusterRequest requestId. + * StopClusterRequest requestId. * @member {string} requestId - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @instance */ - DeleteClusterRequest.prototype.requestId = ""; + StopClusterRequest.prototype.requestId = ""; /** - * Creates a new DeleteClusterRequest instance using the specified properties. + * Creates a new StopClusterRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @static - * @param {google.cloud.dataproc.v1.IDeleteClusterRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.DeleteClusterRequest} DeleteClusterRequest instance + * @param {google.cloud.dataproc.v1.IStopClusterRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.StopClusterRequest} StopClusterRequest instance */ - DeleteClusterRequest.create = function create(properties) { - return new DeleteClusterRequest(properties); + StopClusterRequest.create = function create(properties) { + return new StopClusterRequest(properties); }; /** - * Encodes the specified DeleteClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.DeleteClusterRequest.verify|verify} messages. + * Encodes the specified StopClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.StopClusterRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @static - * @param {google.cloud.dataproc.v1.IDeleteClusterRequest} message DeleteClusterRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IStopClusterRequest} message StopClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteClusterRequest.encode = function encode(message, writer) { + StopClusterRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); - if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.clusterName); if (message.region != null && Object.hasOwnProperty.call(message, "region")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); + writer.uint32(/* id 2, wireType 2 =*/18).string(message.region); + if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.clusterName); if (message.clusterUuid != null && Object.hasOwnProperty.call(message, "clusterUuid")) writer.uint32(/* id 4, wireType 2 =*/34).string(message.clusterUuid); if (message.requestId != null && Object.hasOwnProperty.call(message, "requestId")) @@ -20362,33 +20419,33 @@ }; /** - * Encodes the specified DeleteClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DeleteClusterRequest.verify|verify} messages. + * Encodes the specified StopClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.StopClusterRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @static - * @param {google.cloud.dataproc.v1.IDeleteClusterRequest} message DeleteClusterRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IStopClusterRequest} message StopClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { + StopClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteClusterRequest message from the specified reader or buffer. + * Decodes a StopClusterRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.DeleteClusterRequest} DeleteClusterRequest + * @returns {google.cloud.dataproc.v1.StopClusterRequest} StopClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteClusterRequest.decode = function decode(reader, length) { + StopClusterRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.DeleteClusterRequest(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.StopClusterRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { @@ -20396,11 +20453,11 @@ message.projectId = reader.string(); break; } - case 3: { + case 2: { message.region = reader.string(); break; } - case 2: { + case 3: { message.clusterName = reader.string(); break; } @@ -20421,30 +20478,30 @@ }; /** - * Decodes a DeleteClusterRequest message from the specified reader or buffer, length delimited. + * Decodes a StopClusterRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.DeleteClusterRequest} DeleteClusterRequest + * @returns {google.cloud.dataproc.v1.StopClusterRequest} StopClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteClusterRequest.decodeDelimited = function decodeDelimited(reader) { + StopClusterRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteClusterRequest message. + * Verifies a StopClusterRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteClusterRequest.verify = function verify(message) { + StopClusterRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.projectId != null && message.hasOwnProperty("projectId")) @@ -20466,17 +20523,17 @@ }; /** - * Creates a DeleteClusterRequest message from a plain object. Also converts values to their respective internal types. + * Creates a StopClusterRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.DeleteClusterRequest} DeleteClusterRequest + * @returns {google.cloud.dataproc.v1.StopClusterRequest} StopClusterRequest */ - DeleteClusterRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.DeleteClusterRequest) + StopClusterRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.StopClusterRequest) return object; - var message = new $root.google.cloud.dataproc.v1.DeleteClusterRequest(); + var message = new $root.google.cloud.dataproc.v1.StopClusterRequest(); if (object.projectId != null) message.projectId = String(object.projectId); if (object.region != null) @@ -20491,31 +20548,31 @@ }; /** - * Creates a plain object from a DeleteClusterRequest message. Also converts values to other types if specified. + * Creates a plain object from a StopClusterRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @static - * @param {google.cloud.dataproc.v1.DeleteClusterRequest} message DeleteClusterRequest + * @param {google.cloud.dataproc.v1.StopClusterRequest} message StopClusterRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteClusterRequest.toObject = function toObject(message, options) { + StopClusterRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { object.projectId = ""; - object.clusterName = ""; object.region = ""; + object.clusterName = ""; object.clusterUuid = ""; object.requestId = ""; } if (message.projectId != null && message.hasOwnProperty("projectId")) object.projectId = message.projectId; - if (message.clusterName != null && message.hasOwnProperty("clusterName")) - object.clusterName = message.clusterName; if (message.region != null && message.hasOwnProperty("region")) object.region = message.region; + if (message.clusterName != null && message.hasOwnProperty("clusterName")) + object.clusterName = message.clusterName; if (message.clusterUuid != null && message.hasOwnProperty("clusterUuid")) object.clusterUuid = message.clusterUuid; if (message.requestId != null && message.hasOwnProperty("requestId")) @@ -20524,54 +20581,56 @@ }; /** - * Converts this DeleteClusterRequest to JSON. + * Converts this StopClusterRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @instance * @returns {Object.} JSON object */ - DeleteClusterRequest.prototype.toJSON = function toJSON() { + StopClusterRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteClusterRequest + * Gets the default type url for StopClusterRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.DeleteClusterRequest + * @memberof google.cloud.dataproc.v1.StopClusterRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StopClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.DeleteClusterRequest"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.StopClusterRequest"; }; - return DeleteClusterRequest; + return StopClusterRequest; })(); - v1.GetClusterRequest = (function() { + v1.StartClusterRequest = (function() { /** - * Properties of a GetClusterRequest. + * Properties of a StartClusterRequest. * @memberof google.cloud.dataproc.v1 - * @interface IGetClusterRequest - * @property {string|null} [projectId] GetClusterRequest projectId - * @property {string|null} [region] GetClusterRequest region - * @property {string|null} [clusterName] GetClusterRequest clusterName + * @interface IStartClusterRequest + * @property {string|null} [projectId] StartClusterRequest projectId + * @property {string|null} [region] StartClusterRequest region + * @property {string|null} [clusterName] StartClusterRequest clusterName + * @property {string|null} [clusterUuid] StartClusterRequest clusterUuid + * @property {string|null} [requestId] StartClusterRequest requestId */ /** - * Constructs a new GetClusterRequest. + * Constructs a new StartClusterRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a GetClusterRequest. - * @implements IGetClusterRequest + * @classdesc Represents a StartClusterRequest. + * @implements IStartClusterRequest * @constructor - * @param {google.cloud.dataproc.v1.IGetClusterRequest=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IStartClusterRequest=} [properties] Properties to set */ - function GetClusterRequest(properties) { + function StartClusterRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -20579,90 +20638,110 @@ } /** - * GetClusterRequest projectId. + * StartClusterRequest projectId. * @member {string} projectId - * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @memberof google.cloud.dataproc.v1.StartClusterRequest * @instance */ - GetClusterRequest.prototype.projectId = ""; + StartClusterRequest.prototype.projectId = ""; /** - * GetClusterRequest region. + * StartClusterRequest region. * @member {string} region - * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @memberof google.cloud.dataproc.v1.StartClusterRequest * @instance */ - GetClusterRequest.prototype.region = ""; + StartClusterRequest.prototype.region = ""; /** - * GetClusterRequest clusterName. + * StartClusterRequest clusterName. * @member {string} clusterName - * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @memberof google.cloud.dataproc.v1.StartClusterRequest * @instance */ - GetClusterRequest.prototype.clusterName = ""; + StartClusterRequest.prototype.clusterName = ""; /** - * Creates a new GetClusterRequest instance using the specified properties. + * StartClusterRequest clusterUuid. + * @member {string} clusterUuid + * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @instance + */ + StartClusterRequest.prototype.clusterUuid = ""; + + /** + * StartClusterRequest requestId. + * @member {string} requestId + * @memberof google.cloud.dataproc.v1.StartClusterRequest + * @instance + */ + StartClusterRequest.prototype.requestId = ""; + + /** + * Creates a new StartClusterRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @memberof google.cloud.dataproc.v1.StartClusterRequest * @static - * @param {google.cloud.dataproc.v1.IGetClusterRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.GetClusterRequest} GetClusterRequest instance + * @param {google.cloud.dataproc.v1.IStartClusterRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.StartClusterRequest} StartClusterRequest instance */ - GetClusterRequest.create = function create(properties) { - return new GetClusterRequest(properties); + StartClusterRequest.create = function create(properties) { + return new StartClusterRequest(properties); }; /** - * Encodes the specified GetClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.GetClusterRequest.verify|verify} messages. + * Encodes the specified StartClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.StartClusterRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @memberof google.cloud.dataproc.v1.StartClusterRequest * @static - * @param {google.cloud.dataproc.v1.IGetClusterRequest} message GetClusterRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IStartClusterRequest} message StartClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetClusterRequest.encode = function encode(message, writer) { + StartClusterRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); - if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.clusterName); if (message.region != null && Object.hasOwnProperty.call(message, "region")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); + writer.uint32(/* id 2, wireType 2 =*/18).string(message.region); + if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.clusterName); + if (message.clusterUuid != null && Object.hasOwnProperty.call(message, "clusterUuid")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.clusterUuid); + if (message.requestId != null && Object.hasOwnProperty.call(message, "requestId")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.requestId); return writer; }; /** - * Encodes the specified GetClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.GetClusterRequest.verify|verify} messages. + * Encodes the specified StartClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.StartClusterRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @memberof google.cloud.dataproc.v1.StartClusterRequest * @static - * @param {google.cloud.dataproc.v1.IGetClusterRequest} message GetClusterRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IStartClusterRequest} message StartClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { + StartClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetClusterRequest message from the specified reader or buffer. + * Decodes a StartClusterRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @memberof google.cloud.dataproc.v1.StartClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.GetClusterRequest} GetClusterRequest + * @returns {google.cloud.dataproc.v1.StartClusterRequest} StartClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetClusterRequest.decode = function decode(reader, length) { + StartClusterRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.GetClusterRequest(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.StartClusterRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { @@ -20670,14 +20749,22 @@ message.projectId = reader.string(); break; } - case 3: { + case 2: { message.region = reader.string(); break; } - case 2: { + case 3: { message.clusterName = reader.string(); break; } + case 4: { + message.clusterUuid = reader.string(); + break; + } + case 5: { + message.requestId = reader.string(); + break; + } default: reader.skipType(tag & 7); break; @@ -20687,30 +20774,30 @@ }; /** - * Decodes a GetClusterRequest message from the specified reader or buffer, length delimited. + * Decodes a StartClusterRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @memberof google.cloud.dataproc.v1.StartClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.GetClusterRequest} GetClusterRequest + * @returns {google.cloud.dataproc.v1.StartClusterRequest} StartClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetClusterRequest.decodeDelimited = function decodeDelimited(reader) { + StartClusterRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetClusterRequest message. + * Verifies a StartClusterRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @memberof google.cloud.dataproc.v1.StartClusterRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetClusterRequest.verify = function verify(message) { + StartClusterRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.projectId != null && message.hasOwnProperty("projectId")) @@ -20722,108 +20809,124 @@ if (message.clusterName != null && message.hasOwnProperty("clusterName")) if (!$util.isString(message.clusterName)) return "clusterName: string expected"; + if (message.clusterUuid != null && message.hasOwnProperty("clusterUuid")) + if (!$util.isString(message.clusterUuid)) + return "clusterUuid: string expected"; + if (message.requestId != null && message.hasOwnProperty("requestId")) + if (!$util.isString(message.requestId)) + return "requestId: string expected"; return null; }; /** - * Creates a GetClusterRequest message from a plain object. Also converts values to their respective internal types. + * Creates a StartClusterRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @memberof google.cloud.dataproc.v1.StartClusterRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.GetClusterRequest} GetClusterRequest + * @returns {google.cloud.dataproc.v1.StartClusterRequest} StartClusterRequest */ - GetClusterRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.GetClusterRequest) + StartClusterRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.StartClusterRequest) return object; - var message = new $root.google.cloud.dataproc.v1.GetClusterRequest(); + var message = new $root.google.cloud.dataproc.v1.StartClusterRequest(); if (object.projectId != null) message.projectId = String(object.projectId); if (object.region != null) message.region = String(object.region); if (object.clusterName != null) message.clusterName = String(object.clusterName); + if (object.clusterUuid != null) + message.clusterUuid = String(object.clusterUuid); + if (object.requestId != null) + message.requestId = String(object.requestId); return message; }; /** - * Creates a plain object from a GetClusterRequest message. Also converts values to other types if specified. + * Creates a plain object from a StartClusterRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @memberof google.cloud.dataproc.v1.StartClusterRequest * @static - * @param {google.cloud.dataproc.v1.GetClusterRequest} message GetClusterRequest + * @param {google.cloud.dataproc.v1.StartClusterRequest} message StartClusterRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetClusterRequest.toObject = function toObject(message, options) { + StartClusterRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { object.projectId = ""; - object.clusterName = ""; object.region = ""; + object.clusterName = ""; + object.clusterUuid = ""; + object.requestId = ""; } if (message.projectId != null && message.hasOwnProperty("projectId")) object.projectId = message.projectId; - if (message.clusterName != null && message.hasOwnProperty("clusterName")) - object.clusterName = message.clusterName; if (message.region != null && message.hasOwnProperty("region")) object.region = message.region; + if (message.clusterName != null && message.hasOwnProperty("clusterName")) + object.clusterName = message.clusterName; + if (message.clusterUuid != null && message.hasOwnProperty("clusterUuid")) + object.clusterUuid = message.clusterUuid; + if (message.requestId != null && message.hasOwnProperty("requestId")) + object.requestId = message.requestId; return object; }; /** - * Converts this GetClusterRequest to JSON. + * Converts this StartClusterRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @memberof google.cloud.dataproc.v1.StartClusterRequest * @instance * @returns {Object.} JSON object */ - GetClusterRequest.prototype.toJSON = function toJSON() { + StartClusterRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetClusterRequest + * Gets the default type url for StartClusterRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @memberof google.cloud.dataproc.v1.StartClusterRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StartClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.GetClusterRequest"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.StartClusterRequest"; }; - return GetClusterRequest; + return StartClusterRequest; })(); - v1.ListClustersRequest = (function() { + v1.DeleteClusterRequest = (function() { /** - * Properties of a ListClustersRequest. + * Properties of a DeleteClusterRequest. * @memberof google.cloud.dataproc.v1 - * @interface IListClustersRequest - * @property {string|null} [projectId] ListClustersRequest projectId - * @property {string|null} [region] ListClustersRequest region - * @property {string|null} [filter] ListClustersRequest filter - * @property {number|null} [pageSize] ListClustersRequest pageSize - * @property {string|null} [pageToken] ListClustersRequest pageToken + * @interface IDeleteClusterRequest + * @property {string|null} [projectId] DeleteClusterRequest projectId + * @property {string|null} [region] DeleteClusterRequest region + * @property {string|null} [clusterName] DeleteClusterRequest clusterName + * @property {string|null} [clusterUuid] DeleteClusterRequest clusterUuid + * @property {string|null} [requestId] DeleteClusterRequest requestId */ /** - * Constructs a new ListClustersRequest. + * Constructs a new DeleteClusterRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a ListClustersRequest. - * @implements IListClustersRequest + * @classdesc Represents a DeleteClusterRequest. + * @implements IDeleteClusterRequest * @constructor - * @param {google.cloud.dataproc.v1.IListClustersRequest=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IDeleteClusterRequest=} [properties] Properties to set */ - function ListClustersRequest(properties) { + function DeleteClusterRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -20831,110 +20934,110 @@ } /** - * ListClustersRequest projectId. + * DeleteClusterRequest projectId. * @member {string} projectId - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @instance */ - ListClustersRequest.prototype.projectId = ""; + DeleteClusterRequest.prototype.projectId = ""; /** - * ListClustersRequest region. + * DeleteClusterRequest region. * @member {string} region - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @instance */ - ListClustersRequest.prototype.region = ""; + DeleteClusterRequest.prototype.region = ""; /** - * ListClustersRequest filter. - * @member {string} filter - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * DeleteClusterRequest clusterName. + * @member {string} clusterName + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @instance */ - ListClustersRequest.prototype.filter = ""; + DeleteClusterRequest.prototype.clusterName = ""; /** - * ListClustersRequest pageSize. - * @member {number} pageSize - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * DeleteClusterRequest clusterUuid. + * @member {string} clusterUuid + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @instance */ - ListClustersRequest.prototype.pageSize = 0; + DeleteClusterRequest.prototype.clusterUuid = ""; /** - * ListClustersRequest pageToken. - * @member {string} pageToken - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * DeleteClusterRequest requestId. + * @member {string} requestId + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @instance */ - ListClustersRequest.prototype.pageToken = ""; + DeleteClusterRequest.prototype.requestId = ""; /** - * Creates a new ListClustersRequest instance using the specified properties. + * Creates a new DeleteClusterRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @static - * @param {google.cloud.dataproc.v1.IListClustersRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.ListClustersRequest} ListClustersRequest instance + * @param {google.cloud.dataproc.v1.IDeleteClusterRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.DeleteClusterRequest} DeleteClusterRequest instance */ - ListClustersRequest.create = function create(properties) { - return new ListClustersRequest(properties); + DeleteClusterRequest.create = function create(properties) { + return new DeleteClusterRequest(properties); }; /** - * Encodes the specified ListClustersRequest message. Does not implicitly {@link google.cloud.dataproc.v1.ListClustersRequest.verify|verify} messages. + * Encodes the specified DeleteClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.DeleteClusterRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @static - * @param {google.cloud.dataproc.v1.IListClustersRequest} message ListClustersRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IDeleteClusterRequest} message DeleteClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ListClustersRequest.encode = function encode(message, writer) { + DeleteClusterRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); - if (message.pageSize != null && Object.hasOwnProperty.call(message, "pageSize")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.pageSize); - if (message.pageToken != null && Object.hasOwnProperty.call(message, "pageToken")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.pageToken); + if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.clusterName); if (message.region != null && Object.hasOwnProperty.call(message, "region")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.region); - if (message.filter != null && Object.hasOwnProperty.call(message, "filter")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.filter); + writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); + if (message.clusterUuid != null && Object.hasOwnProperty.call(message, "clusterUuid")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.clusterUuid); + if (message.requestId != null && Object.hasOwnProperty.call(message, "requestId")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.requestId); return writer; }; /** - * Encodes the specified ListClustersRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ListClustersRequest.verify|verify} messages. + * Encodes the specified DeleteClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DeleteClusterRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @static - * @param {google.cloud.dataproc.v1.IListClustersRequest} message ListClustersRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IDeleteClusterRequest} message DeleteClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ListClustersRequest.encodeDelimited = function encodeDelimited(message, writer) { + DeleteClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ListClustersRequest message from the specified reader or buffer. + * Decodes a DeleteClusterRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.ListClustersRequest} ListClustersRequest + * @returns {google.cloud.dataproc.v1.DeleteClusterRequest} DeleteClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListClustersRequest.decode = function decode(reader, length) { + DeleteClusterRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ListClustersRequest(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.DeleteClusterRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { @@ -20942,20 +21045,20 @@ message.projectId = reader.string(); break; } - case 4: { + case 3: { message.region = reader.string(); break; } - case 5: { - message.filter = reader.string(); + case 2: { + message.clusterName = reader.string(); break; } - case 2: { - message.pageSize = reader.int32(); + case 4: { + message.clusterUuid = reader.string(); break; } - case 3: { - message.pageToken = reader.string(); + case 5: { + message.requestId = reader.string(); break; } default: @@ -20967,30 +21070,30 @@ }; /** - * Decodes a ListClustersRequest message from the specified reader or buffer, length delimited. + * Decodes a DeleteClusterRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.ListClustersRequest} ListClustersRequest + * @returns {google.cloud.dataproc.v1.DeleteClusterRequest} DeleteClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListClustersRequest.decodeDelimited = function decodeDelimited(reader) { + DeleteClusterRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ListClustersRequest message. + * Verifies a DeleteClusterRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ListClustersRequest.verify = function verify(message) { + DeleteClusterRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.projectId != null && message.hasOwnProperty("projectId")) @@ -20999,125 +21102,125 @@ if (message.region != null && message.hasOwnProperty("region")) if (!$util.isString(message.region)) return "region: string expected"; - if (message.filter != null && message.hasOwnProperty("filter")) - if (!$util.isString(message.filter)) - return "filter: string expected"; - if (message.pageSize != null && message.hasOwnProperty("pageSize")) - if (!$util.isInteger(message.pageSize)) - return "pageSize: integer expected"; - if (message.pageToken != null && message.hasOwnProperty("pageToken")) - if (!$util.isString(message.pageToken)) - return "pageToken: string expected"; + if (message.clusterName != null && message.hasOwnProperty("clusterName")) + if (!$util.isString(message.clusterName)) + return "clusterName: string expected"; + if (message.clusterUuid != null && message.hasOwnProperty("clusterUuid")) + if (!$util.isString(message.clusterUuid)) + return "clusterUuid: string expected"; + if (message.requestId != null && message.hasOwnProperty("requestId")) + if (!$util.isString(message.requestId)) + return "requestId: string expected"; return null; }; /** - * Creates a ListClustersRequest message from a plain object. Also converts values to their respective internal types. + * Creates a DeleteClusterRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.ListClustersRequest} ListClustersRequest + * @returns {google.cloud.dataproc.v1.DeleteClusterRequest} DeleteClusterRequest */ - ListClustersRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.ListClustersRequest) + DeleteClusterRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.DeleteClusterRequest) return object; - var message = new $root.google.cloud.dataproc.v1.ListClustersRequest(); + var message = new $root.google.cloud.dataproc.v1.DeleteClusterRequest(); if (object.projectId != null) message.projectId = String(object.projectId); if (object.region != null) message.region = String(object.region); - if (object.filter != null) - message.filter = String(object.filter); - if (object.pageSize != null) - message.pageSize = object.pageSize | 0; - if (object.pageToken != null) - message.pageToken = String(object.pageToken); + if (object.clusterName != null) + message.clusterName = String(object.clusterName); + if (object.clusterUuid != null) + message.clusterUuid = String(object.clusterUuid); + if (object.requestId != null) + message.requestId = String(object.requestId); return message; }; /** - * Creates a plain object from a ListClustersRequest message. Also converts values to other types if specified. + * Creates a plain object from a DeleteClusterRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @static - * @param {google.cloud.dataproc.v1.ListClustersRequest} message ListClustersRequest + * @param {google.cloud.dataproc.v1.DeleteClusterRequest} message DeleteClusterRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ListClustersRequest.toObject = function toObject(message, options) { + DeleteClusterRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { object.projectId = ""; - object.pageSize = 0; - object.pageToken = ""; + object.clusterName = ""; object.region = ""; - object.filter = ""; + object.clusterUuid = ""; + object.requestId = ""; } if (message.projectId != null && message.hasOwnProperty("projectId")) object.projectId = message.projectId; - if (message.pageSize != null && message.hasOwnProperty("pageSize")) - object.pageSize = message.pageSize; - if (message.pageToken != null && message.hasOwnProperty("pageToken")) - object.pageToken = message.pageToken; + if (message.clusterName != null && message.hasOwnProperty("clusterName")) + object.clusterName = message.clusterName; if (message.region != null && message.hasOwnProperty("region")) object.region = message.region; - if (message.filter != null && message.hasOwnProperty("filter")) - object.filter = message.filter; + if (message.clusterUuid != null && message.hasOwnProperty("clusterUuid")) + object.clusterUuid = message.clusterUuid; + if (message.requestId != null && message.hasOwnProperty("requestId")) + object.requestId = message.requestId; return object; }; /** - * Converts this ListClustersRequest to JSON. + * Converts this DeleteClusterRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @instance * @returns {Object.} JSON object */ - ListClustersRequest.prototype.toJSON = function toJSON() { + DeleteClusterRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ListClustersRequest + * Gets the default type url for DeleteClusterRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @memberof google.cloud.dataproc.v1.DeleteClusterRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ListClustersRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + DeleteClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.ListClustersRequest"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.DeleteClusterRequest"; }; - return ListClustersRequest; + return DeleteClusterRequest; })(); - v1.ListClustersResponse = (function() { + v1.GetClusterRequest = (function() { /** - * Properties of a ListClustersResponse. + * Properties of a GetClusterRequest. * @memberof google.cloud.dataproc.v1 - * @interface IListClustersResponse - * @property {Array.|null} [clusters] ListClustersResponse clusters - * @property {string|null} [nextPageToken] ListClustersResponse nextPageToken + * @interface IGetClusterRequest + * @property {string|null} [projectId] GetClusterRequest projectId + * @property {string|null} [region] GetClusterRequest region + * @property {string|null} [clusterName] GetClusterRequest clusterName */ /** - * Constructs a new ListClustersResponse. + * Constructs a new GetClusterRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a ListClustersResponse. - * @implements IListClustersResponse + * @classdesc Represents a GetClusterRequest. + * @implements IGetClusterRequest * @constructor - * @param {google.cloud.dataproc.v1.IListClustersResponse=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IGetClusterRequest=} [properties] Properties to set */ - function ListClustersResponse(properties) { - this.clusters = []; + function GetClusterRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -21125,92 +21228,103 @@ } /** - * ListClustersResponse clusters. - * @member {Array.} clusters - * @memberof google.cloud.dataproc.v1.ListClustersResponse + * GetClusterRequest projectId. + * @member {string} projectId + * @memberof google.cloud.dataproc.v1.GetClusterRequest * @instance */ - ListClustersResponse.prototype.clusters = $util.emptyArray; + GetClusterRequest.prototype.projectId = ""; /** - * ListClustersResponse nextPageToken. - * @member {string} nextPageToken - * @memberof google.cloud.dataproc.v1.ListClustersResponse + * GetClusterRequest region. + * @member {string} region + * @memberof google.cloud.dataproc.v1.GetClusterRequest * @instance */ - ListClustersResponse.prototype.nextPageToken = ""; + GetClusterRequest.prototype.region = ""; /** - * Creates a new ListClustersResponse instance using the specified properties. + * GetClusterRequest clusterName. + * @member {string} clusterName + * @memberof google.cloud.dataproc.v1.GetClusterRequest + * @instance + */ + GetClusterRequest.prototype.clusterName = ""; + + /** + * Creates a new GetClusterRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.ListClustersResponse + * @memberof google.cloud.dataproc.v1.GetClusterRequest * @static - * @param {google.cloud.dataproc.v1.IListClustersResponse=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.ListClustersResponse} ListClustersResponse instance + * @param {google.cloud.dataproc.v1.IGetClusterRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.GetClusterRequest} GetClusterRequest instance */ - ListClustersResponse.create = function create(properties) { - return new ListClustersResponse(properties); + GetClusterRequest.create = function create(properties) { + return new GetClusterRequest(properties); }; /** - * Encodes the specified ListClustersResponse message. Does not implicitly {@link google.cloud.dataproc.v1.ListClustersResponse.verify|verify} messages. + * Encodes the specified GetClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.GetClusterRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.ListClustersResponse + * @memberof google.cloud.dataproc.v1.GetClusterRequest * @static - * @param {google.cloud.dataproc.v1.IListClustersResponse} message ListClustersResponse message or plain object to encode + * @param {google.cloud.dataproc.v1.IGetClusterRequest} message GetClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ListClustersResponse.encode = function encode(message, writer) { + GetClusterRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.clusters != null && message.clusters.length) - for (var i = 0; i < message.clusters.length; ++i) - $root.google.cloud.dataproc.v1.Cluster.encode(message.clusters[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.nextPageToken != null && Object.hasOwnProperty.call(message, "nextPageToken")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.nextPageToken); + if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); + if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.clusterName); + if (message.region != null && Object.hasOwnProperty.call(message, "region")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); return writer; }; /** - * Encodes the specified ListClustersResponse message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ListClustersResponse.verify|verify} messages. + * Encodes the specified GetClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.GetClusterRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.ListClustersResponse + * @memberof google.cloud.dataproc.v1.GetClusterRequest * @static - * @param {google.cloud.dataproc.v1.IListClustersResponse} message ListClustersResponse message or plain object to encode + * @param {google.cloud.dataproc.v1.IGetClusterRequest} message GetClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ListClustersResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ListClustersResponse message from the specified reader or buffer. + * Decodes a GetClusterRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.ListClustersResponse + * @memberof google.cloud.dataproc.v1.GetClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.ListClustersResponse} ListClustersResponse + * @returns {google.cloud.dataproc.v1.GetClusterRequest} GetClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListClustersResponse.decode = function decode(reader, length) { + GetClusterRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ListClustersResponse(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.GetClusterRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.clusters && message.clusters.length)) - message.clusters = []; - message.clusters.push($root.google.cloud.dataproc.v1.Cluster.decode(reader, reader.uint32())); + message.projectId = reader.string(); + break; + } + case 3: { + message.region = reader.string(); break; } case 2: { - message.nextPageToken = reader.string(); + message.clusterName = reader.string(); break; } default: @@ -21222,150 +21336,143 @@ }; /** - * Decodes a ListClustersResponse message from the specified reader or buffer, length delimited. + * Decodes a GetClusterRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.ListClustersResponse + * @memberof google.cloud.dataproc.v1.GetClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.ListClustersResponse} ListClustersResponse + * @returns {google.cloud.dataproc.v1.GetClusterRequest} GetClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListClustersResponse.decodeDelimited = function decodeDelimited(reader) { + GetClusterRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ListClustersResponse message. + * Verifies a GetClusterRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.ListClustersResponse + * @memberof google.cloud.dataproc.v1.GetClusterRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ListClustersResponse.verify = function verify(message) { + GetClusterRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.clusters != null && message.hasOwnProperty("clusters")) { - if (!Array.isArray(message.clusters)) - return "clusters: array expected"; - for (var i = 0; i < message.clusters.length; ++i) { - var error = $root.google.cloud.dataproc.v1.Cluster.verify(message.clusters[i]); - if (error) - return "clusters." + error; - } - } - if (message.nextPageToken != null && message.hasOwnProperty("nextPageToken")) - if (!$util.isString(message.nextPageToken)) - return "nextPageToken: string expected"; + if (message.projectId != null && message.hasOwnProperty("projectId")) + if (!$util.isString(message.projectId)) + return "projectId: string expected"; + if (message.region != null && message.hasOwnProperty("region")) + if (!$util.isString(message.region)) + return "region: string expected"; + if (message.clusterName != null && message.hasOwnProperty("clusterName")) + if (!$util.isString(message.clusterName)) + return "clusterName: string expected"; return null; }; /** - * Creates a ListClustersResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetClusterRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.ListClustersResponse + * @memberof google.cloud.dataproc.v1.GetClusterRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.ListClustersResponse} ListClustersResponse + * @returns {google.cloud.dataproc.v1.GetClusterRequest} GetClusterRequest */ - ListClustersResponse.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.ListClustersResponse) + GetClusterRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.GetClusterRequest) return object; - var message = new $root.google.cloud.dataproc.v1.ListClustersResponse(); - if (object.clusters) { - if (!Array.isArray(object.clusters)) - throw TypeError(".google.cloud.dataproc.v1.ListClustersResponse.clusters: array expected"); - message.clusters = []; - for (var i = 0; i < object.clusters.length; ++i) { - if (typeof object.clusters[i] !== "object") - throw TypeError(".google.cloud.dataproc.v1.ListClustersResponse.clusters: object expected"); - message.clusters[i] = $root.google.cloud.dataproc.v1.Cluster.fromObject(object.clusters[i]); - } - } - if (object.nextPageToken != null) - message.nextPageToken = String(object.nextPageToken); + var message = new $root.google.cloud.dataproc.v1.GetClusterRequest(); + if (object.projectId != null) + message.projectId = String(object.projectId); + if (object.region != null) + message.region = String(object.region); + if (object.clusterName != null) + message.clusterName = String(object.clusterName); return message; }; /** - * Creates a plain object from a ListClustersResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetClusterRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.ListClustersResponse + * @memberof google.cloud.dataproc.v1.GetClusterRequest * @static - * @param {google.cloud.dataproc.v1.ListClustersResponse} message ListClustersResponse + * @param {google.cloud.dataproc.v1.GetClusterRequest} message GetClusterRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ListClustersResponse.toObject = function toObject(message, options) { + GetClusterRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.arrays || options.defaults) - object.clusters = []; - if (options.defaults) - object.nextPageToken = ""; - if (message.clusters && message.clusters.length) { - object.clusters = []; - for (var j = 0; j < message.clusters.length; ++j) - object.clusters[j] = $root.google.cloud.dataproc.v1.Cluster.toObject(message.clusters[j], options); + if (options.defaults) { + object.projectId = ""; + object.clusterName = ""; + object.region = ""; } - if (message.nextPageToken != null && message.hasOwnProperty("nextPageToken")) - object.nextPageToken = message.nextPageToken; + if (message.projectId != null && message.hasOwnProperty("projectId")) + object.projectId = message.projectId; + if (message.clusterName != null && message.hasOwnProperty("clusterName")) + object.clusterName = message.clusterName; + if (message.region != null && message.hasOwnProperty("region")) + object.region = message.region; return object; }; /** - * Converts this ListClustersResponse to JSON. + * Converts this GetClusterRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.ListClustersResponse + * @memberof google.cloud.dataproc.v1.GetClusterRequest * @instance * @returns {Object.} JSON object */ - ListClustersResponse.prototype.toJSON = function toJSON() { + GetClusterRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ListClustersResponse + * Gets the default type url for GetClusterRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.ListClustersResponse + * @memberof google.cloud.dataproc.v1.GetClusterRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ListClustersResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.ListClustersResponse"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.GetClusterRequest"; }; - return ListClustersResponse; + return GetClusterRequest; })(); - v1.DiagnoseClusterRequest = (function() { + v1.ListClustersRequest = (function() { /** - * Properties of a DiagnoseClusterRequest. + * Properties of a ListClustersRequest. * @memberof google.cloud.dataproc.v1 - * @interface IDiagnoseClusterRequest - * @property {string|null} [projectId] DiagnoseClusterRequest projectId - * @property {string|null} [region] DiagnoseClusterRequest region - * @property {string|null} [clusterName] DiagnoseClusterRequest clusterName + * @interface IListClustersRequest + * @property {string|null} [projectId] ListClustersRequest projectId + * @property {string|null} [region] ListClustersRequest region + * @property {string|null} [filter] ListClustersRequest filter + * @property {number|null} [pageSize] ListClustersRequest pageSize + * @property {string|null} [pageToken] ListClustersRequest pageToken */ /** - * Constructs a new DiagnoseClusterRequest. + * Constructs a new ListClustersRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a DiagnoseClusterRequest. - * @implements IDiagnoseClusterRequest + * @classdesc Represents a ListClustersRequest. + * @implements IListClustersRequest * @constructor - * @param {google.cloud.dataproc.v1.IDiagnoseClusterRequest=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IListClustersRequest=} [properties] Properties to set */ - function DiagnoseClusterRequest(properties) { + function ListClustersRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -21373,90 +21480,110 @@ } /** - * DiagnoseClusterRequest projectId. + * ListClustersRequest projectId. * @member {string} projectId - * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest + * @memberof google.cloud.dataproc.v1.ListClustersRequest * @instance */ - DiagnoseClusterRequest.prototype.projectId = ""; + ListClustersRequest.prototype.projectId = ""; /** - * DiagnoseClusterRequest region. + * ListClustersRequest region. * @member {string} region - * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest + * @memberof google.cloud.dataproc.v1.ListClustersRequest * @instance */ - DiagnoseClusterRequest.prototype.region = ""; + ListClustersRequest.prototype.region = ""; /** - * DiagnoseClusterRequest clusterName. - * @member {string} clusterName - * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest + * ListClustersRequest filter. + * @member {string} filter + * @memberof google.cloud.dataproc.v1.ListClustersRequest * @instance */ - DiagnoseClusterRequest.prototype.clusterName = ""; + ListClustersRequest.prototype.filter = ""; /** - * Creates a new DiagnoseClusterRequest instance using the specified properties. + * ListClustersRequest pageSize. + * @member {number} pageSize + * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @instance + */ + ListClustersRequest.prototype.pageSize = 0; + + /** + * ListClustersRequest pageToken. + * @member {string} pageToken + * @memberof google.cloud.dataproc.v1.ListClustersRequest + * @instance + */ + ListClustersRequest.prototype.pageToken = ""; + + /** + * Creates a new ListClustersRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest + * @memberof google.cloud.dataproc.v1.ListClustersRequest * @static - * @param {google.cloud.dataproc.v1.IDiagnoseClusterRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.DiagnoseClusterRequest} DiagnoseClusterRequest instance + * @param {google.cloud.dataproc.v1.IListClustersRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.ListClustersRequest} ListClustersRequest instance */ - DiagnoseClusterRequest.create = function create(properties) { - return new DiagnoseClusterRequest(properties); + ListClustersRequest.create = function create(properties) { + return new ListClustersRequest(properties); }; /** - * Encodes the specified DiagnoseClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.DiagnoseClusterRequest.verify|verify} messages. + * Encodes the specified ListClustersRequest message. Does not implicitly {@link google.cloud.dataproc.v1.ListClustersRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest + * @memberof google.cloud.dataproc.v1.ListClustersRequest * @static - * @param {google.cloud.dataproc.v1.IDiagnoseClusterRequest} message DiagnoseClusterRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IListClustersRequest} message ListClustersRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DiagnoseClusterRequest.encode = function encode(message, writer) { + ListClustersRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); - if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.clusterName); + if (message.pageSize != null && Object.hasOwnProperty.call(message, "pageSize")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.pageSize); + if (message.pageToken != null && Object.hasOwnProperty.call(message, "pageToken")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.pageToken); if (message.region != null && Object.hasOwnProperty.call(message, "region")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); + writer.uint32(/* id 4, wireType 2 =*/34).string(message.region); + if (message.filter != null && Object.hasOwnProperty.call(message, "filter")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.filter); return writer; }; /** - * Encodes the specified DiagnoseClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DiagnoseClusterRequest.verify|verify} messages. + * Encodes the specified ListClustersRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ListClustersRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest + * @memberof google.cloud.dataproc.v1.ListClustersRequest * @static - * @param {google.cloud.dataproc.v1.IDiagnoseClusterRequest} message DiagnoseClusterRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IListClustersRequest} message ListClustersRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DiagnoseClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { + ListClustersRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DiagnoseClusterRequest message from the specified reader or buffer. + * Decodes a ListClustersRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest + * @memberof google.cloud.dataproc.v1.ListClustersRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.DiagnoseClusterRequest} DiagnoseClusterRequest + * @returns {google.cloud.dataproc.v1.ListClustersRequest} ListClustersRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DiagnoseClusterRequest.decode = function decode(reader, length) { + ListClustersRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.DiagnoseClusterRequest(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ListClustersRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { @@ -21464,12 +21591,20 @@ message.projectId = reader.string(); break; } - case 3: { + case 4: { message.region = reader.string(); break; } + case 5: { + message.filter = reader.string(); + break; + } case 2: { - message.clusterName = reader.string(); + message.pageSize = reader.int32(); + break; + } + case 3: { + message.pageToken = reader.string(); break; } default: @@ -21481,30 +21616,30 @@ }; /** - * Decodes a DiagnoseClusterRequest message from the specified reader or buffer, length delimited. + * Decodes a ListClustersRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest + * @memberof google.cloud.dataproc.v1.ListClustersRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.DiagnoseClusterRequest} DiagnoseClusterRequest + * @returns {google.cloud.dataproc.v1.ListClustersRequest} ListClustersRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DiagnoseClusterRequest.decodeDelimited = function decodeDelimited(reader) { + ListClustersRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DiagnoseClusterRequest message. + * Verifies a ListClustersRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest + * @memberof google.cloud.dataproc.v1.ListClustersRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DiagnoseClusterRequest.verify = function verify(message) { + ListClustersRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.projectId != null && message.hasOwnProperty("projectId")) @@ -21513,107 +21648,125 @@ if (message.region != null && message.hasOwnProperty("region")) if (!$util.isString(message.region)) return "region: string expected"; - if (message.clusterName != null && message.hasOwnProperty("clusterName")) - if (!$util.isString(message.clusterName)) - return "clusterName: string expected"; + if (message.filter != null && message.hasOwnProperty("filter")) + if (!$util.isString(message.filter)) + return "filter: string expected"; + if (message.pageSize != null && message.hasOwnProperty("pageSize")) + if (!$util.isInteger(message.pageSize)) + return "pageSize: integer expected"; + if (message.pageToken != null && message.hasOwnProperty("pageToken")) + if (!$util.isString(message.pageToken)) + return "pageToken: string expected"; return null; }; /** - * Creates a DiagnoseClusterRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ListClustersRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest + * @memberof google.cloud.dataproc.v1.ListClustersRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.DiagnoseClusterRequest} DiagnoseClusterRequest + * @returns {google.cloud.dataproc.v1.ListClustersRequest} ListClustersRequest */ - DiagnoseClusterRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.DiagnoseClusterRequest) + ListClustersRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.ListClustersRequest) return object; - var message = new $root.google.cloud.dataproc.v1.DiagnoseClusterRequest(); + var message = new $root.google.cloud.dataproc.v1.ListClustersRequest(); if (object.projectId != null) message.projectId = String(object.projectId); if (object.region != null) message.region = String(object.region); - if (object.clusterName != null) - message.clusterName = String(object.clusterName); + if (object.filter != null) + message.filter = String(object.filter); + if (object.pageSize != null) + message.pageSize = object.pageSize | 0; + if (object.pageToken != null) + message.pageToken = String(object.pageToken); return message; }; /** - * Creates a plain object from a DiagnoseClusterRequest message. Also converts values to other types if specified. + * Creates a plain object from a ListClustersRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest + * @memberof google.cloud.dataproc.v1.ListClustersRequest * @static - * @param {google.cloud.dataproc.v1.DiagnoseClusterRequest} message DiagnoseClusterRequest + * @param {google.cloud.dataproc.v1.ListClustersRequest} message ListClustersRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DiagnoseClusterRequest.toObject = function toObject(message, options) { + ListClustersRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { object.projectId = ""; - object.clusterName = ""; + object.pageSize = 0; + object.pageToken = ""; object.region = ""; + object.filter = ""; } if (message.projectId != null && message.hasOwnProperty("projectId")) object.projectId = message.projectId; - if (message.clusterName != null && message.hasOwnProperty("clusterName")) - object.clusterName = message.clusterName; + if (message.pageSize != null && message.hasOwnProperty("pageSize")) + object.pageSize = message.pageSize; + if (message.pageToken != null && message.hasOwnProperty("pageToken")) + object.pageToken = message.pageToken; if (message.region != null && message.hasOwnProperty("region")) object.region = message.region; + if (message.filter != null && message.hasOwnProperty("filter")) + object.filter = message.filter; return object; }; /** - * Converts this DiagnoseClusterRequest to JSON. + * Converts this ListClustersRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest + * @memberof google.cloud.dataproc.v1.ListClustersRequest * @instance * @returns {Object.} JSON object */ - DiagnoseClusterRequest.prototype.toJSON = function toJSON() { + ListClustersRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DiagnoseClusterRequest + * Gets the default type url for ListClustersRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest + * @memberof google.cloud.dataproc.v1.ListClustersRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DiagnoseClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ListClustersRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.DiagnoseClusterRequest"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.ListClustersRequest"; }; - return DiagnoseClusterRequest; + return ListClustersRequest; })(); - v1.DiagnoseClusterResults = (function() { + v1.ListClustersResponse = (function() { /** - * Properties of a DiagnoseClusterResults. + * Properties of a ListClustersResponse. * @memberof google.cloud.dataproc.v1 - * @interface IDiagnoseClusterResults - * @property {string|null} [outputUri] DiagnoseClusterResults outputUri + * @interface IListClustersResponse + * @property {Array.|null} [clusters] ListClustersResponse clusters + * @property {string|null} [nextPageToken] ListClustersResponse nextPageToken */ /** - * Constructs a new DiagnoseClusterResults. + * Constructs a new ListClustersResponse. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a DiagnoseClusterResults. - * @implements IDiagnoseClusterResults + * @classdesc Represents a ListClustersResponse. + * @implements IListClustersResponse * @constructor - * @param {google.cloud.dataproc.v1.IDiagnoseClusterResults=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IListClustersResponse=} [properties] Properties to set */ - function DiagnoseClusterResults(properties) { + function ListClustersResponse(properties) { + this.clusters = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -21621,75 +21774,92 @@ } /** - * DiagnoseClusterResults outputUri. - * @member {string} outputUri - * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * ListClustersResponse clusters. + * @member {Array.} clusters + * @memberof google.cloud.dataproc.v1.ListClustersResponse * @instance */ - DiagnoseClusterResults.prototype.outputUri = ""; + ListClustersResponse.prototype.clusters = $util.emptyArray; /** - * Creates a new DiagnoseClusterResults instance using the specified properties. + * ListClustersResponse nextPageToken. + * @member {string} nextPageToken + * @memberof google.cloud.dataproc.v1.ListClustersResponse + * @instance + */ + ListClustersResponse.prototype.nextPageToken = ""; + + /** + * Creates a new ListClustersResponse instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @memberof google.cloud.dataproc.v1.ListClustersResponse * @static - * @param {google.cloud.dataproc.v1.IDiagnoseClusterResults=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.DiagnoseClusterResults} DiagnoseClusterResults instance + * @param {google.cloud.dataproc.v1.IListClustersResponse=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.ListClustersResponse} ListClustersResponse instance */ - DiagnoseClusterResults.create = function create(properties) { - return new DiagnoseClusterResults(properties); + ListClustersResponse.create = function create(properties) { + return new ListClustersResponse(properties); }; /** - * Encodes the specified DiagnoseClusterResults message. Does not implicitly {@link google.cloud.dataproc.v1.DiagnoseClusterResults.verify|verify} messages. + * Encodes the specified ListClustersResponse message. Does not implicitly {@link google.cloud.dataproc.v1.ListClustersResponse.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @memberof google.cloud.dataproc.v1.ListClustersResponse * @static - * @param {google.cloud.dataproc.v1.IDiagnoseClusterResults} message DiagnoseClusterResults message or plain object to encode + * @param {google.cloud.dataproc.v1.IListClustersResponse} message ListClustersResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DiagnoseClusterResults.encode = function encode(message, writer) { + ListClustersResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.outputUri != null && Object.hasOwnProperty.call(message, "outputUri")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.outputUri); + if (message.clusters != null && message.clusters.length) + for (var i = 0; i < message.clusters.length; ++i) + $root.google.cloud.dataproc.v1.Cluster.encode(message.clusters[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.nextPageToken != null && Object.hasOwnProperty.call(message, "nextPageToken")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.nextPageToken); return writer; }; /** - * Encodes the specified DiagnoseClusterResults message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DiagnoseClusterResults.verify|verify} messages. + * Encodes the specified ListClustersResponse message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ListClustersResponse.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @memberof google.cloud.dataproc.v1.ListClustersResponse * @static - * @param {google.cloud.dataproc.v1.IDiagnoseClusterResults} message DiagnoseClusterResults message or plain object to encode + * @param {google.cloud.dataproc.v1.IListClustersResponse} message ListClustersResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DiagnoseClusterResults.encodeDelimited = function encodeDelimited(message, writer) { + ListClustersResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DiagnoseClusterResults message from the specified reader or buffer. + * Decodes a ListClustersResponse message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @memberof google.cloud.dataproc.v1.ListClustersResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.DiagnoseClusterResults} DiagnoseClusterResults + * @returns {google.cloud.dataproc.v1.ListClustersResponse} ListClustersResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DiagnoseClusterResults.decode = function decode(reader, length) { + ListClustersResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.DiagnoseClusterResults(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ListClustersResponse(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.outputUri = reader.string(); + if (!(message.clusters && message.clusters.length)) + message.clusters = []; + message.clusters.push($root.google.cloud.dataproc.v1.Cluster.decode(reader, reader.uint32())); + break; + } + case 2: { + message.nextPageToken = reader.string(); break; } default: @@ -21701,125 +21871,150 @@ }; /** - * Decodes a DiagnoseClusterResults message from the specified reader or buffer, length delimited. + * Decodes a ListClustersResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @memberof google.cloud.dataproc.v1.ListClustersResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.DiagnoseClusterResults} DiagnoseClusterResults + * @returns {google.cloud.dataproc.v1.ListClustersResponse} ListClustersResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DiagnoseClusterResults.decodeDelimited = function decodeDelimited(reader) { + ListClustersResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DiagnoseClusterResults message. + * Verifies a ListClustersResponse message. * @function verify - * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @memberof google.cloud.dataproc.v1.ListClustersResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DiagnoseClusterResults.verify = function verify(message) { + ListClustersResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.outputUri != null && message.hasOwnProperty("outputUri")) - if (!$util.isString(message.outputUri)) - return "outputUri: string expected"; + if (message.clusters != null && message.hasOwnProperty("clusters")) { + if (!Array.isArray(message.clusters)) + return "clusters: array expected"; + for (var i = 0; i < message.clusters.length; ++i) { + var error = $root.google.cloud.dataproc.v1.Cluster.verify(message.clusters[i]); + if (error) + return "clusters." + error; + } + } + if (message.nextPageToken != null && message.hasOwnProperty("nextPageToken")) + if (!$util.isString(message.nextPageToken)) + return "nextPageToken: string expected"; return null; }; /** - * Creates a DiagnoseClusterResults message from a plain object. Also converts values to their respective internal types. + * Creates a ListClustersResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @memberof google.cloud.dataproc.v1.ListClustersResponse * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.DiagnoseClusterResults} DiagnoseClusterResults + * @returns {google.cloud.dataproc.v1.ListClustersResponse} ListClustersResponse */ - DiagnoseClusterResults.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.DiagnoseClusterResults) + ListClustersResponse.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.ListClustersResponse) return object; - var message = new $root.google.cloud.dataproc.v1.DiagnoseClusterResults(); - if (object.outputUri != null) - message.outputUri = String(object.outputUri); + var message = new $root.google.cloud.dataproc.v1.ListClustersResponse(); + if (object.clusters) { + if (!Array.isArray(object.clusters)) + throw TypeError(".google.cloud.dataproc.v1.ListClustersResponse.clusters: array expected"); + message.clusters = []; + for (var i = 0; i < object.clusters.length; ++i) { + if (typeof object.clusters[i] !== "object") + throw TypeError(".google.cloud.dataproc.v1.ListClustersResponse.clusters: object expected"); + message.clusters[i] = $root.google.cloud.dataproc.v1.Cluster.fromObject(object.clusters[i]); + } + } + if (object.nextPageToken != null) + message.nextPageToken = String(object.nextPageToken); return message; }; /** - * Creates a plain object from a DiagnoseClusterResults message. Also converts values to other types if specified. + * Creates a plain object from a ListClustersResponse message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @memberof google.cloud.dataproc.v1.ListClustersResponse * @static - * @param {google.cloud.dataproc.v1.DiagnoseClusterResults} message DiagnoseClusterResults + * @param {google.cloud.dataproc.v1.ListClustersResponse} message ListClustersResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DiagnoseClusterResults.toObject = function toObject(message, options) { + ListClustersResponse.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; + if (options.arrays || options.defaults) + object.clusters = []; if (options.defaults) - object.outputUri = ""; - if (message.outputUri != null && message.hasOwnProperty("outputUri")) - object.outputUri = message.outputUri; + object.nextPageToken = ""; + if (message.clusters && message.clusters.length) { + object.clusters = []; + for (var j = 0; j < message.clusters.length; ++j) + object.clusters[j] = $root.google.cloud.dataproc.v1.Cluster.toObject(message.clusters[j], options); + } + if (message.nextPageToken != null && message.hasOwnProperty("nextPageToken")) + object.nextPageToken = message.nextPageToken; return object; }; /** - * Converts this DiagnoseClusterResults to JSON. + * Converts this ListClustersResponse to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @memberof google.cloud.dataproc.v1.ListClustersResponse * @instance * @returns {Object.} JSON object */ - DiagnoseClusterResults.prototype.toJSON = function toJSON() { + ListClustersResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DiagnoseClusterResults + * Gets the default type url for ListClustersResponse * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @memberof google.cloud.dataproc.v1.ListClustersResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DiagnoseClusterResults.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ListClustersResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.DiagnoseClusterResults"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.ListClustersResponse"; }; - return DiagnoseClusterResults; + return ListClustersResponse; })(); - v1.ReservationAffinity = (function() { + v1.DiagnoseClusterRequest = (function() { /** - * Properties of a ReservationAffinity. + * Properties of a DiagnoseClusterRequest. * @memberof google.cloud.dataproc.v1 - * @interface IReservationAffinity - * @property {google.cloud.dataproc.v1.ReservationAffinity.Type|null} [consumeReservationType] ReservationAffinity consumeReservationType - * @property {string|null} [key] ReservationAffinity key - * @property {Array.|null} [values] ReservationAffinity values + * @interface IDiagnoseClusterRequest + * @property {string|null} [projectId] DiagnoseClusterRequest projectId + * @property {string|null} [region] DiagnoseClusterRequest region + * @property {string|null} [clusterName] DiagnoseClusterRequest clusterName */ /** - * Constructs a new ReservationAffinity. + * Constructs a new DiagnoseClusterRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a ReservationAffinity. - * @implements IReservationAffinity + * @classdesc Represents a DiagnoseClusterRequest. + * @implements IDiagnoseClusterRequest * @constructor - * @param {google.cloud.dataproc.v1.IReservationAffinity=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IDiagnoseClusterRequest=} [properties] Properties to set */ - function ReservationAffinity(properties) { - this.values = []; + function DiagnoseClusterRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -21827,106 +22022,103 @@ } /** - * ReservationAffinity consumeReservationType. - * @member {google.cloud.dataproc.v1.ReservationAffinity.Type} consumeReservationType - * @memberof google.cloud.dataproc.v1.ReservationAffinity + * DiagnoseClusterRequest projectId. + * @member {string} projectId + * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest * @instance */ - ReservationAffinity.prototype.consumeReservationType = 0; + DiagnoseClusterRequest.prototype.projectId = ""; /** - * ReservationAffinity key. - * @member {string} key - * @memberof google.cloud.dataproc.v1.ReservationAffinity + * DiagnoseClusterRequest region. + * @member {string} region + * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest * @instance */ - ReservationAffinity.prototype.key = ""; + DiagnoseClusterRequest.prototype.region = ""; /** - * ReservationAffinity values. - * @member {Array.} values - * @memberof google.cloud.dataproc.v1.ReservationAffinity + * DiagnoseClusterRequest clusterName. + * @member {string} clusterName + * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest * @instance */ - ReservationAffinity.prototype.values = $util.emptyArray; + DiagnoseClusterRequest.prototype.clusterName = ""; /** - * Creates a new ReservationAffinity instance using the specified properties. + * Creates a new DiagnoseClusterRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.ReservationAffinity + * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest * @static - * @param {google.cloud.dataproc.v1.IReservationAffinity=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.ReservationAffinity} ReservationAffinity instance + * @param {google.cloud.dataproc.v1.IDiagnoseClusterRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.DiagnoseClusterRequest} DiagnoseClusterRequest instance */ - ReservationAffinity.create = function create(properties) { - return new ReservationAffinity(properties); + DiagnoseClusterRequest.create = function create(properties) { + return new DiagnoseClusterRequest(properties); }; /** - * Encodes the specified ReservationAffinity message. Does not implicitly {@link google.cloud.dataproc.v1.ReservationAffinity.verify|verify} messages. + * Encodes the specified DiagnoseClusterRequest message. Does not implicitly {@link google.cloud.dataproc.v1.DiagnoseClusterRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.ReservationAffinity + * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest * @static - * @param {google.cloud.dataproc.v1.IReservationAffinity} message ReservationAffinity message or plain object to encode + * @param {google.cloud.dataproc.v1.IDiagnoseClusterRequest} message DiagnoseClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReservationAffinity.encode = function encode(message, writer) { + DiagnoseClusterRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.consumeReservationType != null && Object.hasOwnProperty.call(message, "consumeReservationType")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.consumeReservationType); - if (message.key != null && Object.hasOwnProperty.call(message, "key")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.key); - if (message.values != null && message.values.length) - for (var i = 0; i < message.values.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.values[i]); + if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); + if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.clusterName); + if (message.region != null && Object.hasOwnProperty.call(message, "region")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); return writer; }; /** - * Encodes the specified ReservationAffinity message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ReservationAffinity.verify|verify} messages. + * Encodes the specified DiagnoseClusterRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DiagnoseClusterRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.ReservationAffinity + * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest * @static - * @param {google.cloud.dataproc.v1.IReservationAffinity} message ReservationAffinity message or plain object to encode + * @param {google.cloud.dataproc.v1.IDiagnoseClusterRequest} message DiagnoseClusterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReservationAffinity.encodeDelimited = function encodeDelimited(message, writer) { + DiagnoseClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReservationAffinity message from the specified reader or buffer. + * Decodes a DiagnoseClusterRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.ReservationAffinity + * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.ReservationAffinity} ReservationAffinity + * @returns {google.cloud.dataproc.v1.DiagnoseClusterRequest} DiagnoseClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReservationAffinity.decode = function decode(reader, length) { + DiagnoseClusterRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ReservationAffinity(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.DiagnoseClusterRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.consumeReservationType = reader.int32(); + message.projectId = reader.string(); break; } - case 2: { - message.key = reader.string(); + case 3: { + message.region = reader.string(); break; } - case 3: { - if (!(message.values && message.values.length)) - message.values = []; - message.values.push(reader.string()); + case 2: { + message.clusterName = reader.string(); break; } default: @@ -21938,466 +22130,345 @@ }; /** - * Decodes a ReservationAffinity message from the specified reader or buffer, length delimited. + * Decodes a DiagnoseClusterRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.ReservationAffinity + * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.ReservationAffinity} ReservationAffinity + * @returns {google.cloud.dataproc.v1.DiagnoseClusterRequest} DiagnoseClusterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReservationAffinity.decodeDelimited = function decodeDelimited(reader) { + DiagnoseClusterRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReservationAffinity message. + * Verifies a DiagnoseClusterRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.ReservationAffinity + * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReservationAffinity.verify = function verify(message) { + DiagnoseClusterRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.consumeReservationType != null && message.hasOwnProperty("consumeReservationType")) - switch (message.consumeReservationType) { - default: - return "consumeReservationType: enum value expected"; - case 0: - case 1: - case 2: - case 3: - break; - } - if (message.key != null && message.hasOwnProperty("key")) - if (!$util.isString(message.key)) - return "key: string expected"; - if (message.values != null && message.hasOwnProperty("values")) { - if (!Array.isArray(message.values)) - return "values: array expected"; - for (var i = 0; i < message.values.length; ++i) - if (!$util.isString(message.values[i])) - return "values: string[] expected"; - } + if (message.projectId != null && message.hasOwnProperty("projectId")) + if (!$util.isString(message.projectId)) + return "projectId: string expected"; + if (message.region != null && message.hasOwnProperty("region")) + if (!$util.isString(message.region)) + return "region: string expected"; + if (message.clusterName != null && message.hasOwnProperty("clusterName")) + if (!$util.isString(message.clusterName)) + return "clusterName: string expected"; return null; }; /** - * Creates a ReservationAffinity message from a plain object. Also converts values to their respective internal types. + * Creates a DiagnoseClusterRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.ReservationAffinity + * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.ReservationAffinity} ReservationAffinity + * @returns {google.cloud.dataproc.v1.DiagnoseClusterRequest} DiagnoseClusterRequest */ - ReservationAffinity.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.ReservationAffinity) + DiagnoseClusterRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.DiagnoseClusterRequest) return object; - var message = new $root.google.cloud.dataproc.v1.ReservationAffinity(); - switch (object.consumeReservationType) { - default: - if (typeof object.consumeReservationType === "number") { - message.consumeReservationType = object.consumeReservationType; - break; - } - break; - case "TYPE_UNSPECIFIED": - case 0: - message.consumeReservationType = 0; - break; - case "NO_RESERVATION": - case 1: - message.consumeReservationType = 1; - break; - case "ANY_RESERVATION": - case 2: - message.consumeReservationType = 2; - break; - case "SPECIFIC_RESERVATION": - case 3: - message.consumeReservationType = 3; - break; - } - if (object.key != null) - message.key = String(object.key); - if (object.values) { - if (!Array.isArray(object.values)) - throw TypeError(".google.cloud.dataproc.v1.ReservationAffinity.values: array expected"); - message.values = []; - for (var i = 0; i < object.values.length; ++i) - message.values[i] = String(object.values[i]); - } + var message = new $root.google.cloud.dataproc.v1.DiagnoseClusterRequest(); + if (object.projectId != null) + message.projectId = String(object.projectId); + if (object.region != null) + message.region = String(object.region); + if (object.clusterName != null) + message.clusterName = String(object.clusterName); return message; }; /** - * Creates a plain object from a ReservationAffinity message. Also converts values to other types if specified. + * Creates a plain object from a DiagnoseClusterRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.ReservationAffinity + * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest * @static - * @param {google.cloud.dataproc.v1.ReservationAffinity} message ReservationAffinity + * @param {google.cloud.dataproc.v1.DiagnoseClusterRequest} message DiagnoseClusterRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReservationAffinity.toObject = function toObject(message, options) { + DiagnoseClusterRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.arrays || options.defaults) - object.values = []; if (options.defaults) { - object.consumeReservationType = options.enums === String ? "TYPE_UNSPECIFIED" : 0; - object.key = ""; - } - if (message.consumeReservationType != null && message.hasOwnProperty("consumeReservationType")) - object.consumeReservationType = options.enums === String ? $root.google.cloud.dataproc.v1.ReservationAffinity.Type[message.consumeReservationType] === undefined ? message.consumeReservationType : $root.google.cloud.dataproc.v1.ReservationAffinity.Type[message.consumeReservationType] : message.consumeReservationType; - if (message.key != null && message.hasOwnProperty("key")) - object.key = message.key; - if (message.values && message.values.length) { - object.values = []; - for (var j = 0; j < message.values.length; ++j) - object.values[j] = message.values[j]; + object.projectId = ""; + object.clusterName = ""; + object.region = ""; } - return object; - }; - + if (message.projectId != null && message.hasOwnProperty("projectId")) + object.projectId = message.projectId; + if (message.clusterName != null && message.hasOwnProperty("clusterName")) + object.clusterName = message.clusterName; + if (message.region != null && message.hasOwnProperty("region")) + object.region = message.region; + return object; + }; + /** - * Converts this ReservationAffinity to JSON. + * Converts this DiagnoseClusterRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.ReservationAffinity + * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest * @instance * @returns {Object.} JSON object */ - ReservationAffinity.prototype.toJSON = function toJSON() { + DiagnoseClusterRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReservationAffinity + * Gets the default type url for DiagnoseClusterRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.ReservationAffinity + * @memberof google.cloud.dataproc.v1.DiagnoseClusterRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReservationAffinity.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + DiagnoseClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.ReservationAffinity"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.DiagnoseClusterRequest"; }; - /** - * Type enum. - * @name google.cloud.dataproc.v1.ReservationAffinity.Type - * @enum {number} - * @property {number} TYPE_UNSPECIFIED=0 TYPE_UNSPECIFIED value - * @property {number} NO_RESERVATION=1 NO_RESERVATION value - * @property {number} ANY_RESERVATION=2 ANY_RESERVATION value - * @property {number} SPECIFIC_RESERVATION=3 SPECIFIC_RESERVATION value - */ - ReservationAffinity.Type = (function() { - var valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "TYPE_UNSPECIFIED"] = 0; - values[valuesById[1] = "NO_RESERVATION"] = 1; - values[valuesById[2] = "ANY_RESERVATION"] = 2; - values[valuesById[3] = "SPECIFIC_RESERVATION"] = 3; - return values; - })(); - - return ReservationAffinity; + return DiagnoseClusterRequest; })(); - v1.JobController = (function() { + v1.DiagnoseClusterResults = (function() { /** - * Constructs a new JobController service. + * Properties of a DiagnoseClusterResults. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a JobController - * @extends $protobuf.rpc.Service + * @interface IDiagnoseClusterResults + * @property {string|null} [outputUri] DiagnoseClusterResults outputUri + */ + + /** + * Constructs a new DiagnoseClusterResults. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a DiagnoseClusterResults. + * @implements IDiagnoseClusterResults * @constructor - * @param {$protobuf.RPCImpl} rpcImpl RPC implementation - * @param {boolean} [requestDelimited=false] Whether requests are length-delimited - * @param {boolean} [responseDelimited=false] Whether responses are length-delimited + * @param {google.cloud.dataproc.v1.IDiagnoseClusterResults=} [properties] Properties to set */ - function JobController(rpcImpl, requestDelimited, responseDelimited) { - $protobuf.rpc.Service.call(this, rpcImpl, requestDelimited, responseDelimited); + function DiagnoseClusterResults(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; } - (JobController.prototype = Object.create($protobuf.rpc.Service.prototype)).constructor = JobController; + /** + * DiagnoseClusterResults outputUri. + * @member {string} outputUri + * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @instance + */ + DiagnoseClusterResults.prototype.outputUri = ""; /** - * Creates new JobController service using the specified rpc implementation. + * Creates a new DiagnoseClusterResults instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.JobController + * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults * @static - * @param {$protobuf.RPCImpl} rpcImpl RPC implementation - * @param {boolean} [requestDelimited=false] Whether requests are length-delimited - * @param {boolean} [responseDelimited=false] Whether responses are length-delimited - * @returns {JobController} RPC service. Useful where requests and/or responses are streamed. + * @param {google.cloud.dataproc.v1.IDiagnoseClusterResults=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.DiagnoseClusterResults} DiagnoseClusterResults instance */ - JobController.create = function create(rpcImpl, requestDelimited, responseDelimited) { - return new this(rpcImpl, requestDelimited, responseDelimited); + DiagnoseClusterResults.create = function create(properties) { + return new DiagnoseClusterResults(properties); }; /** - * Callback as used by {@link google.cloud.dataproc.v1.JobController|submitJob}. - * @memberof google.cloud.dataproc.v1.JobController - * @typedef SubmitJobCallback - * @type {function} - * @param {Error|null} error Error, if any - * @param {google.cloud.dataproc.v1.Job} [response] Job + * Encodes the specified DiagnoseClusterResults message. Does not implicitly {@link google.cloud.dataproc.v1.DiagnoseClusterResults.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @static + * @param {google.cloud.dataproc.v1.IDiagnoseClusterResults} message DiagnoseClusterResults message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ + DiagnoseClusterResults.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.outputUri != null && Object.hasOwnProperty.call(message, "outputUri")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.outputUri); + return writer; + }; /** - * Calls SubmitJob. - * @function submitJob - * @memberof google.cloud.dataproc.v1.JobController - * @instance - * @param {google.cloud.dataproc.v1.ISubmitJobRequest} request SubmitJobRequest message or plain object - * @param {google.cloud.dataproc.v1.JobController.SubmitJobCallback} callback Node-style callback called with the error, if any, and Job - * @returns {undefined} - * @variation 1 + * Encodes the specified DiagnoseClusterResults message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DiagnoseClusterResults.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @static + * @param {google.cloud.dataproc.v1.IDiagnoseClusterResults} message DiagnoseClusterResults message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - Object.defineProperty(JobController.prototype.submitJob = function submitJob(request, callback) { - return this.rpcCall(submitJob, $root.google.cloud.dataproc.v1.SubmitJobRequest, $root.google.cloud.dataproc.v1.Job, request, callback); - }, "name", { value: "SubmitJob" }); + DiagnoseClusterResults.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; /** - * Calls SubmitJob. - * @function submitJob - * @memberof google.cloud.dataproc.v1.JobController - * @instance - * @param {google.cloud.dataproc.v1.ISubmitJobRequest} request SubmitJobRequest message or plain object - * @returns {Promise} Promise - * @variation 2 + * Decodes a DiagnoseClusterResults message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1.DiagnoseClusterResults} DiagnoseClusterResults + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing */ + DiagnoseClusterResults.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.DiagnoseClusterResults(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.outputUri = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; /** - * Callback as used by {@link google.cloud.dataproc.v1.JobController|submitJobAsOperation}. - * @memberof google.cloud.dataproc.v1.JobController - * @typedef SubmitJobAsOperationCallback - * @type {function} - * @param {Error|null} error Error, if any - * @param {google.longrunning.Operation} [response] Operation + * Decodes a DiagnoseClusterResults message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1.DiagnoseClusterResults} DiagnoseClusterResults + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing */ + DiagnoseClusterResults.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; /** - * Calls SubmitJobAsOperation. - * @function submitJobAsOperation - * @memberof google.cloud.dataproc.v1.JobController - * @instance - * @param {google.cloud.dataproc.v1.ISubmitJobRequest} request SubmitJobRequest message or plain object - * @param {google.cloud.dataproc.v1.JobController.SubmitJobAsOperationCallback} callback Node-style callback called with the error, if any, and Operation - * @returns {undefined} - * @variation 1 + * Verifies a DiagnoseClusterResults message. + * @function verify + * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Object.defineProperty(JobController.prototype.submitJobAsOperation = function submitJobAsOperation(request, callback) { - return this.rpcCall(submitJobAsOperation, $root.google.cloud.dataproc.v1.SubmitJobRequest, $root.google.longrunning.Operation, request, callback); - }, "name", { value: "SubmitJobAsOperation" }); + DiagnoseClusterResults.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.outputUri != null && message.hasOwnProperty("outputUri")) + if (!$util.isString(message.outputUri)) + return "outputUri: string expected"; + return null; + }; /** - * Calls SubmitJobAsOperation. - * @function submitJobAsOperation - * @memberof google.cloud.dataproc.v1.JobController - * @instance - * @param {google.cloud.dataproc.v1.ISubmitJobRequest} request SubmitJobRequest message or plain object - * @returns {Promise} Promise - * @variation 2 + * Creates a DiagnoseClusterResults message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1.DiagnoseClusterResults} DiagnoseClusterResults */ + DiagnoseClusterResults.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.DiagnoseClusterResults) + return object; + var message = new $root.google.cloud.dataproc.v1.DiagnoseClusterResults(); + if (object.outputUri != null) + message.outputUri = String(object.outputUri); + return message; + }; /** - * Callback as used by {@link google.cloud.dataproc.v1.JobController|getJob}. - * @memberof google.cloud.dataproc.v1.JobController - * @typedef GetJobCallback - * @type {function} - * @param {Error|null} error Error, if any - * @param {google.cloud.dataproc.v1.Job} [response] Job + * Creates a plain object from a DiagnoseClusterResults message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @static + * @param {google.cloud.dataproc.v1.DiagnoseClusterResults} message DiagnoseClusterResults + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object */ + DiagnoseClusterResults.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) + object.outputUri = ""; + if (message.outputUri != null && message.hasOwnProperty("outputUri")) + object.outputUri = message.outputUri; + return object; + }; /** - * Calls GetJob. - * @function getJob - * @memberof google.cloud.dataproc.v1.JobController - * @instance - * @param {google.cloud.dataproc.v1.IGetJobRequest} request GetJobRequest message or plain object - * @param {google.cloud.dataproc.v1.JobController.GetJobCallback} callback Node-style callback called with the error, if any, and Job - * @returns {undefined} - * @variation 1 - */ - Object.defineProperty(JobController.prototype.getJob = function getJob(request, callback) { - return this.rpcCall(getJob, $root.google.cloud.dataproc.v1.GetJobRequest, $root.google.cloud.dataproc.v1.Job, request, callback); - }, "name", { value: "GetJob" }); - - /** - * Calls GetJob. - * @function getJob - * @memberof google.cloud.dataproc.v1.JobController - * @instance - * @param {google.cloud.dataproc.v1.IGetJobRequest} request GetJobRequest message or plain object - * @returns {Promise} Promise - * @variation 2 - */ - - /** - * Callback as used by {@link google.cloud.dataproc.v1.JobController|listJobs}. - * @memberof google.cloud.dataproc.v1.JobController - * @typedef ListJobsCallback - * @type {function} - * @param {Error|null} error Error, if any - * @param {google.cloud.dataproc.v1.ListJobsResponse} [response] ListJobsResponse - */ - - /** - * Calls ListJobs. - * @function listJobs - * @memberof google.cloud.dataproc.v1.JobController - * @instance - * @param {google.cloud.dataproc.v1.IListJobsRequest} request ListJobsRequest message or plain object - * @param {google.cloud.dataproc.v1.JobController.ListJobsCallback} callback Node-style callback called with the error, if any, and ListJobsResponse - * @returns {undefined} - * @variation 1 - */ - Object.defineProperty(JobController.prototype.listJobs = function listJobs(request, callback) { - return this.rpcCall(listJobs, $root.google.cloud.dataproc.v1.ListJobsRequest, $root.google.cloud.dataproc.v1.ListJobsResponse, request, callback); - }, "name", { value: "ListJobs" }); - - /** - * Calls ListJobs. - * @function listJobs - * @memberof google.cloud.dataproc.v1.JobController - * @instance - * @param {google.cloud.dataproc.v1.IListJobsRequest} request ListJobsRequest message or plain object - * @returns {Promise} Promise - * @variation 2 - */ - - /** - * Callback as used by {@link google.cloud.dataproc.v1.JobController|updateJob}. - * @memberof google.cloud.dataproc.v1.JobController - * @typedef UpdateJobCallback - * @type {function} - * @param {Error|null} error Error, if any - * @param {google.cloud.dataproc.v1.Job} [response] Job - */ - - /** - * Calls UpdateJob. - * @function updateJob - * @memberof google.cloud.dataproc.v1.JobController - * @instance - * @param {google.cloud.dataproc.v1.IUpdateJobRequest} request UpdateJobRequest message or plain object - * @param {google.cloud.dataproc.v1.JobController.UpdateJobCallback} callback Node-style callback called with the error, if any, and Job - * @returns {undefined} - * @variation 1 - */ - Object.defineProperty(JobController.prototype.updateJob = function updateJob(request, callback) { - return this.rpcCall(updateJob, $root.google.cloud.dataproc.v1.UpdateJobRequest, $root.google.cloud.dataproc.v1.Job, request, callback); - }, "name", { value: "UpdateJob" }); - - /** - * Calls UpdateJob. - * @function updateJob - * @memberof google.cloud.dataproc.v1.JobController - * @instance - * @param {google.cloud.dataproc.v1.IUpdateJobRequest} request UpdateJobRequest message or plain object - * @returns {Promise} Promise - * @variation 2 - */ - - /** - * Callback as used by {@link google.cloud.dataproc.v1.JobController|cancelJob}. - * @memberof google.cloud.dataproc.v1.JobController - * @typedef CancelJobCallback - * @type {function} - * @param {Error|null} error Error, if any - * @param {google.cloud.dataproc.v1.Job} [response] Job - */ - - /** - * Calls CancelJob. - * @function cancelJob - * @memberof google.cloud.dataproc.v1.JobController - * @instance - * @param {google.cloud.dataproc.v1.ICancelJobRequest} request CancelJobRequest message or plain object - * @param {google.cloud.dataproc.v1.JobController.CancelJobCallback} callback Node-style callback called with the error, if any, and Job - * @returns {undefined} - * @variation 1 - */ - Object.defineProperty(JobController.prototype.cancelJob = function cancelJob(request, callback) { - return this.rpcCall(cancelJob, $root.google.cloud.dataproc.v1.CancelJobRequest, $root.google.cloud.dataproc.v1.Job, request, callback); - }, "name", { value: "CancelJob" }); - - /** - * Calls CancelJob. - * @function cancelJob - * @memberof google.cloud.dataproc.v1.JobController - * @instance - * @param {google.cloud.dataproc.v1.ICancelJobRequest} request CancelJobRequest message or plain object - * @returns {Promise} Promise - * @variation 2 - */ - - /** - * Callback as used by {@link google.cloud.dataproc.v1.JobController|deleteJob}. - * @memberof google.cloud.dataproc.v1.JobController - * @typedef DeleteJobCallback - * @type {function} - * @param {Error|null} error Error, if any - * @param {google.protobuf.Empty} [response] Empty - */ - - /** - * Calls DeleteJob. - * @function deleteJob - * @memberof google.cloud.dataproc.v1.JobController + * Converts this DiagnoseClusterResults to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults * @instance - * @param {google.cloud.dataproc.v1.IDeleteJobRequest} request DeleteJobRequest message or plain object - * @param {google.cloud.dataproc.v1.JobController.DeleteJobCallback} callback Node-style callback called with the error, if any, and Empty - * @returns {undefined} - * @variation 1 + * @returns {Object.} JSON object */ - Object.defineProperty(JobController.prototype.deleteJob = function deleteJob(request, callback) { - return this.rpcCall(deleteJob, $root.google.cloud.dataproc.v1.DeleteJobRequest, $root.google.protobuf.Empty, request, callback); - }, "name", { value: "DeleteJob" }); + DiagnoseClusterResults.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; /** - * Calls DeleteJob. - * @function deleteJob - * @memberof google.cloud.dataproc.v1.JobController - * @instance - * @param {google.cloud.dataproc.v1.IDeleteJobRequest} request DeleteJobRequest message or plain object - * @returns {Promise} Promise - * @variation 2 + * Gets the default type url for DiagnoseClusterResults + * @function getTypeUrl + * @memberof google.cloud.dataproc.v1.DiagnoseClusterResults + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url */ + DiagnoseClusterResults.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/google.cloud.dataproc.v1.DiagnoseClusterResults"; + }; - return JobController; + return DiagnoseClusterResults; })(); - v1.LoggingConfig = (function() { + v1.ReservationAffinity = (function() { /** - * Properties of a LoggingConfig. + * Properties of a ReservationAffinity. * @memberof google.cloud.dataproc.v1 - * @interface ILoggingConfig - * @property {Object.|null} [driverLogLevels] LoggingConfig driverLogLevels + * @interface IReservationAffinity + * @property {google.cloud.dataproc.v1.ReservationAffinity.Type|null} [consumeReservationType] ReservationAffinity consumeReservationType + * @property {string|null} [key] ReservationAffinity key + * @property {Array.|null} [values] ReservationAffinity values */ /** - * Constructs a new LoggingConfig. + * Constructs a new ReservationAffinity. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a LoggingConfig. - * @implements ILoggingConfig + * @classdesc Represents a ReservationAffinity. + * @implements IReservationAffinity * @constructor - * @param {google.cloud.dataproc.v1.ILoggingConfig=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IReservationAffinity=} [properties] Properties to set */ - function LoggingConfig(properties) { - this.driverLogLevels = {}; + function ReservationAffinity(properties) { + this.values = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -22405,95 +22476,106 @@ } /** - * LoggingConfig driverLogLevels. - * @member {Object.} driverLogLevels - * @memberof google.cloud.dataproc.v1.LoggingConfig + * ReservationAffinity consumeReservationType. + * @member {google.cloud.dataproc.v1.ReservationAffinity.Type} consumeReservationType + * @memberof google.cloud.dataproc.v1.ReservationAffinity * @instance */ - LoggingConfig.prototype.driverLogLevels = $util.emptyObject; + ReservationAffinity.prototype.consumeReservationType = 0; /** - * Creates a new LoggingConfig instance using the specified properties. + * ReservationAffinity key. + * @member {string} key + * @memberof google.cloud.dataproc.v1.ReservationAffinity + * @instance + */ + ReservationAffinity.prototype.key = ""; + + /** + * ReservationAffinity values. + * @member {Array.} values + * @memberof google.cloud.dataproc.v1.ReservationAffinity + * @instance + */ + ReservationAffinity.prototype.values = $util.emptyArray; + + /** + * Creates a new ReservationAffinity instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.LoggingConfig + * @memberof google.cloud.dataproc.v1.ReservationAffinity * @static - * @param {google.cloud.dataproc.v1.ILoggingConfig=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.LoggingConfig} LoggingConfig instance + * @param {google.cloud.dataproc.v1.IReservationAffinity=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.ReservationAffinity} ReservationAffinity instance */ - LoggingConfig.create = function create(properties) { - return new LoggingConfig(properties); + ReservationAffinity.create = function create(properties) { + return new ReservationAffinity(properties); }; /** - * Encodes the specified LoggingConfig message. Does not implicitly {@link google.cloud.dataproc.v1.LoggingConfig.verify|verify} messages. + * Encodes the specified ReservationAffinity message. Does not implicitly {@link google.cloud.dataproc.v1.ReservationAffinity.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.LoggingConfig + * @memberof google.cloud.dataproc.v1.ReservationAffinity * @static - * @param {google.cloud.dataproc.v1.ILoggingConfig} message LoggingConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.IReservationAffinity} message ReservationAffinity message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LoggingConfig.encode = function encode(message, writer) { + ReservationAffinity.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.driverLogLevels != null && Object.hasOwnProperty.call(message, "driverLogLevels")) - for (var keys = Object.keys(message.driverLogLevels), i = 0; i < keys.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).int32(message.driverLogLevels[keys[i]]).ldelim(); + if (message.consumeReservationType != null && Object.hasOwnProperty.call(message, "consumeReservationType")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.consumeReservationType); + if (message.key != null && Object.hasOwnProperty.call(message, "key")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.key); + if (message.values != null && message.values.length) + for (var i = 0; i < message.values.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.values[i]); return writer; }; /** - * Encodes the specified LoggingConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.LoggingConfig.verify|verify} messages. + * Encodes the specified ReservationAffinity message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ReservationAffinity.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.LoggingConfig + * @memberof google.cloud.dataproc.v1.ReservationAffinity * @static - * @param {google.cloud.dataproc.v1.ILoggingConfig} message LoggingConfig message or plain object to encode + * @param {google.cloud.dataproc.v1.IReservationAffinity} message ReservationAffinity message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LoggingConfig.encodeDelimited = function encodeDelimited(message, writer) { + ReservationAffinity.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a LoggingConfig message from the specified reader or buffer. + * Decodes a ReservationAffinity message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.LoggingConfig + * @memberof google.cloud.dataproc.v1.ReservationAffinity * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.LoggingConfig} LoggingConfig + * @returns {google.cloud.dataproc.v1.ReservationAffinity} ReservationAffinity * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LoggingConfig.decode = function decode(reader, length) { + ReservationAffinity.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.LoggingConfig(), key, value; + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ReservationAffinity(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.consumeReservationType = reader.int32(); + break; + } case 2: { - if (message.driverLogLevels === $util.emptyObject) - message.driverLogLevels = {}; - var end2 = reader.uint32() + reader.pos; - key = ""; - value = 0; - while (reader.pos < end2) { - var tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = reader.int32(); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.driverLogLevels[key] = value; + message.key = reader.string(); + break; + } + case 3: { + if (!(message.values && message.values.length)) + message.values = []; + message.values.push(reader.string()); break; } default: @@ -22505,432 +22587,547 @@ }; /** - * Decodes a LoggingConfig message from the specified reader or buffer, length delimited. + * Decodes a ReservationAffinity message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.LoggingConfig + * @memberof google.cloud.dataproc.v1.ReservationAffinity * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.LoggingConfig} LoggingConfig + * @returns {google.cloud.dataproc.v1.ReservationAffinity} ReservationAffinity * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LoggingConfig.decodeDelimited = function decodeDelimited(reader) { + ReservationAffinity.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a LoggingConfig message. + * Verifies a ReservationAffinity message. * @function verify - * @memberof google.cloud.dataproc.v1.LoggingConfig + * @memberof google.cloud.dataproc.v1.ReservationAffinity * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - LoggingConfig.verify = function verify(message) { + ReservationAffinity.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.driverLogLevels != null && message.hasOwnProperty("driverLogLevels")) { - if (!$util.isObject(message.driverLogLevels)) - return "driverLogLevels: object expected"; - var key = Object.keys(message.driverLogLevels); - for (var i = 0; i < key.length; ++i) - switch (message.driverLogLevels[key[i]]) { - default: - return "driverLogLevels: enum value{k:string} expected"; - case 0: - case 1: - case 2: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - break; - } + if (message.consumeReservationType != null && message.hasOwnProperty("consumeReservationType")) + switch (message.consumeReservationType) { + default: + return "consumeReservationType: enum value expected"; + case 0: + case 1: + case 2: + case 3: + break; + } + if (message.key != null && message.hasOwnProperty("key")) + if (!$util.isString(message.key)) + return "key: string expected"; + if (message.values != null && message.hasOwnProperty("values")) { + if (!Array.isArray(message.values)) + return "values: array expected"; + for (var i = 0; i < message.values.length; ++i) + if (!$util.isString(message.values[i])) + return "values: string[] expected"; } return null; }; /** - * Creates a LoggingConfig message from a plain object. Also converts values to their respective internal types. + * Creates a ReservationAffinity message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.LoggingConfig + * @memberof google.cloud.dataproc.v1.ReservationAffinity * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.LoggingConfig} LoggingConfig + * @returns {google.cloud.dataproc.v1.ReservationAffinity} ReservationAffinity */ - LoggingConfig.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.LoggingConfig) + ReservationAffinity.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.ReservationAffinity) return object; - var message = new $root.google.cloud.dataproc.v1.LoggingConfig(); - if (object.driverLogLevels) { - if (typeof object.driverLogLevels !== "object") - throw TypeError(".google.cloud.dataproc.v1.LoggingConfig.driverLogLevels: object expected"); - message.driverLogLevels = {}; - for (var keys = Object.keys(object.driverLogLevels), i = 0; i < keys.length; ++i) - switch (object.driverLogLevels[keys[i]]) { - default: - if (typeof object.driverLogLevels[keys[i]] === "number") { - message.driverLogLevels[keys[i]] = object.driverLogLevels[keys[i]]; - break; - } - break; - case "LEVEL_UNSPECIFIED": - case 0: - message.driverLogLevels[keys[i]] = 0; - break; - case "ALL": - case 1: - message.driverLogLevels[keys[i]] = 1; - break; - case "TRACE": - case 2: - message.driverLogLevels[keys[i]] = 2; - break; - case "DEBUG": - case 3: - message.driverLogLevels[keys[i]] = 3; - break; - case "INFO": - case 4: - message.driverLogLevels[keys[i]] = 4; - break; - case "WARN": - case 5: - message.driverLogLevels[keys[i]] = 5; - break; - case "ERROR": - case 6: - message.driverLogLevels[keys[i]] = 6; - break; - case "FATAL": - case 7: - message.driverLogLevels[keys[i]] = 7; - break; - case "OFF": - case 8: - message.driverLogLevels[keys[i]] = 8; - break; - } + var message = new $root.google.cloud.dataproc.v1.ReservationAffinity(); + switch (object.consumeReservationType) { + default: + if (typeof object.consumeReservationType === "number") { + message.consumeReservationType = object.consumeReservationType; + break; + } + break; + case "TYPE_UNSPECIFIED": + case 0: + message.consumeReservationType = 0; + break; + case "NO_RESERVATION": + case 1: + message.consumeReservationType = 1; + break; + case "ANY_RESERVATION": + case 2: + message.consumeReservationType = 2; + break; + case "SPECIFIC_RESERVATION": + case 3: + message.consumeReservationType = 3; + break; + } + if (object.key != null) + message.key = String(object.key); + if (object.values) { + if (!Array.isArray(object.values)) + throw TypeError(".google.cloud.dataproc.v1.ReservationAffinity.values: array expected"); + message.values = []; + for (var i = 0; i < object.values.length; ++i) + message.values[i] = String(object.values[i]); } return message; }; /** - * Creates a plain object from a LoggingConfig message. Also converts values to other types if specified. + * Creates a plain object from a ReservationAffinity message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.LoggingConfig + * @memberof google.cloud.dataproc.v1.ReservationAffinity * @static - * @param {google.cloud.dataproc.v1.LoggingConfig} message LoggingConfig + * @param {google.cloud.dataproc.v1.ReservationAffinity} message ReservationAffinity * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - LoggingConfig.toObject = function toObject(message, options) { + ReservationAffinity.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.objects || options.defaults) - object.driverLogLevels = {}; - var keys2; - if (message.driverLogLevels && (keys2 = Object.keys(message.driverLogLevels)).length) { - object.driverLogLevels = {}; - for (var j = 0; j < keys2.length; ++j) - object.driverLogLevels[keys2[j]] = options.enums === String ? $root.google.cloud.dataproc.v1.LoggingConfig.Level[message.driverLogLevels[keys2[j]]] === undefined ? message.driverLogLevels[keys2[j]] : $root.google.cloud.dataproc.v1.LoggingConfig.Level[message.driverLogLevels[keys2[j]]] : message.driverLogLevels[keys2[j]]; + if (options.arrays || options.defaults) + object.values = []; + if (options.defaults) { + object.consumeReservationType = options.enums === String ? "TYPE_UNSPECIFIED" : 0; + object.key = ""; + } + if (message.consumeReservationType != null && message.hasOwnProperty("consumeReservationType")) + object.consumeReservationType = options.enums === String ? $root.google.cloud.dataproc.v1.ReservationAffinity.Type[message.consumeReservationType] === undefined ? message.consumeReservationType : $root.google.cloud.dataproc.v1.ReservationAffinity.Type[message.consumeReservationType] : message.consumeReservationType; + if (message.key != null && message.hasOwnProperty("key")) + object.key = message.key; + if (message.values && message.values.length) { + object.values = []; + for (var j = 0; j < message.values.length; ++j) + object.values[j] = message.values[j]; } return object; }; /** - * Converts this LoggingConfig to JSON. + * Converts this ReservationAffinity to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.LoggingConfig + * @memberof google.cloud.dataproc.v1.ReservationAffinity * @instance * @returns {Object.} JSON object */ - LoggingConfig.prototype.toJSON = function toJSON() { + ReservationAffinity.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for LoggingConfig + * Gets the default type url for ReservationAffinity * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.LoggingConfig + * @memberof google.cloud.dataproc.v1.ReservationAffinity * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - LoggingConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReservationAffinity.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.LoggingConfig"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.ReservationAffinity"; }; /** - * Level enum. - * @name google.cloud.dataproc.v1.LoggingConfig.Level + * Type enum. + * @name google.cloud.dataproc.v1.ReservationAffinity.Type * @enum {number} - * @property {number} LEVEL_UNSPECIFIED=0 LEVEL_UNSPECIFIED value - * @property {number} ALL=1 ALL value - * @property {number} TRACE=2 TRACE value - * @property {number} DEBUG=3 DEBUG value - * @property {number} INFO=4 INFO value - * @property {number} WARN=5 WARN value - * @property {number} ERROR=6 ERROR value - * @property {number} FATAL=7 FATAL value - * @property {number} OFF=8 OFF value + * @property {number} TYPE_UNSPECIFIED=0 TYPE_UNSPECIFIED value + * @property {number} NO_RESERVATION=1 NO_RESERVATION value + * @property {number} ANY_RESERVATION=2 ANY_RESERVATION value + * @property {number} SPECIFIC_RESERVATION=3 SPECIFIC_RESERVATION value */ - LoggingConfig.Level = (function() { + ReservationAffinity.Type = (function() { var valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "LEVEL_UNSPECIFIED"] = 0; - values[valuesById[1] = "ALL"] = 1; - values[valuesById[2] = "TRACE"] = 2; - values[valuesById[3] = "DEBUG"] = 3; - values[valuesById[4] = "INFO"] = 4; - values[valuesById[5] = "WARN"] = 5; - values[valuesById[6] = "ERROR"] = 6; - values[valuesById[7] = "FATAL"] = 7; - values[valuesById[8] = "OFF"] = 8; - return values; - })(); - - return LoggingConfig; + values[valuesById[0] = "TYPE_UNSPECIFIED"] = 0; + values[valuesById[1] = "NO_RESERVATION"] = 1; + values[valuesById[2] = "ANY_RESERVATION"] = 2; + values[valuesById[3] = "SPECIFIC_RESERVATION"] = 3; + return values; + })(); + + return ReservationAffinity; })(); - v1.HadoopJob = (function() { + v1.JobController = (function() { /** - * Properties of a HadoopJob. + * Constructs a new JobController service. * @memberof google.cloud.dataproc.v1 - * @interface IHadoopJob - * @property {string|null} [mainJarFileUri] HadoopJob mainJarFileUri - * @property {string|null} [mainClass] HadoopJob mainClass - * @property {Array.|null} [args] HadoopJob args - * @property {Array.|null} [jarFileUris] HadoopJob jarFileUris - * @property {Array.|null} [fileUris] HadoopJob fileUris - * @property {Array.|null} [archiveUris] HadoopJob archiveUris - * @property {Object.|null} [properties] HadoopJob properties - * @property {google.cloud.dataproc.v1.ILoggingConfig|null} [loggingConfig] HadoopJob loggingConfig + * @classdesc Represents a JobController + * @extends $protobuf.rpc.Service + * @constructor + * @param {$protobuf.RPCImpl} rpcImpl RPC implementation + * @param {boolean} [requestDelimited=false] Whether requests are length-delimited + * @param {boolean} [responseDelimited=false] Whether responses are length-delimited */ + function JobController(rpcImpl, requestDelimited, responseDelimited) { + $protobuf.rpc.Service.call(this, rpcImpl, requestDelimited, responseDelimited); + } + + (JobController.prototype = Object.create($protobuf.rpc.Service.prototype)).constructor = JobController; /** - * Constructs a new HadoopJob. - * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a HadoopJob. - * @implements IHadoopJob - * @constructor - * @param {google.cloud.dataproc.v1.IHadoopJob=} [properties] Properties to set + * Creates new JobController service using the specified rpc implementation. + * @function create + * @memberof google.cloud.dataproc.v1.JobController + * @static + * @param {$protobuf.RPCImpl} rpcImpl RPC implementation + * @param {boolean} [requestDelimited=false] Whether requests are length-delimited + * @param {boolean} [responseDelimited=false] Whether responses are length-delimited + * @returns {JobController} RPC service. Useful where requests and/or responses are streamed. */ - function HadoopJob(properties) { - this.args = []; - this.jarFileUris = []; - this.fileUris = []; - this.archiveUris = []; - this.properties = {}; - if (properties) - for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + JobController.create = function create(rpcImpl, requestDelimited, responseDelimited) { + return new this(rpcImpl, requestDelimited, responseDelimited); + }; /** - * HadoopJob mainJarFileUri. - * @member {string|null|undefined} mainJarFileUri - * @memberof google.cloud.dataproc.v1.HadoopJob - * @instance + * Callback as used by {@link google.cloud.dataproc.v1.JobController|submitJob}. + * @memberof google.cloud.dataproc.v1.JobController + * @typedef SubmitJobCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {google.cloud.dataproc.v1.Job} [response] Job */ - HadoopJob.prototype.mainJarFileUri = null; /** - * HadoopJob mainClass. - * @member {string|null|undefined} mainClass - * @memberof google.cloud.dataproc.v1.HadoopJob + * Calls SubmitJob. + * @function submitJob + * @memberof google.cloud.dataproc.v1.JobController * @instance + * @param {google.cloud.dataproc.v1.ISubmitJobRequest} request SubmitJobRequest message or plain object + * @param {google.cloud.dataproc.v1.JobController.SubmitJobCallback} callback Node-style callback called with the error, if any, and Job + * @returns {undefined} + * @variation 1 */ - HadoopJob.prototype.mainClass = null; + Object.defineProperty(JobController.prototype.submitJob = function submitJob(request, callback) { + return this.rpcCall(submitJob, $root.google.cloud.dataproc.v1.SubmitJobRequest, $root.google.cloud.dataproc.v1.Job, request, callback); + }, "name", { value: "SubmitJob" }); /** - * HadoopJob args. - * @member {Array.} args - * @memberof google.cloud.dataproc.v1.HadoopJob + * Calls SubmitJob. + * @function submitJob + * @memberof google.cloud.dataproc.v1.JobController * @instance + * @param {google.cloud.dataproc.v1.ISubmitJobRequest} request SubmitJobRequest message or plain object + * @returns {Promise} Promise + * @variation 2 */ - HadoopJob.prototype.args = $util.emptyArray; /** - * HadoopJob jarFileUris. - * @member {Array.} jarFileUris - * @memberof google.cloud.dataproc.v1.HadoopJob - * @instance + * Callback as used by {@link google.cloud.dataproc.v1.JobController|submitJobAsOperation}. + * @memberof google.cloud.dataproc.v1.JobController + * @typedef SubmitJobAsOperationCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {google.longrunning.Operation} [response] Operation */ - HadoopJob.prototype.jarFileUris = $util.emptyArray; /** - * HadoopJob fileUris. - * @member {Array.} fileUris - * @memberof google.cloud.dataproc.v1.HadoopJob + * Calls SubmitJobAsOperation. + * @function submitJobAsOperation + * @memberof google.cloud.dataproc.v1.JobController * @instance + * @param {google.cloud.dataproc.v1.ISubmitJobRequest} request SubmitJobRequest message or plain object + * @param {google.cloud.dataproc.v1.JobController.SubmitJobAsOperationCallback} callback Node-style callback called with the error, if any, and Operation + * @returns {undefined} + * @variation 1 */ - HadoopJob.prototype.fileUris = $util.emptyArray; + Object.defineProperty(JobController.prototype.submitJobAsOperation = function submitJobAsOperation(request, callback) { + return this.rpcCall(submitJobAsOperation, $root.google.cloud.dataproc.v1.SubmitJobRequest, $root.google.longrunning.Operation, request, callback); + }, "name", { value: "SubmitJobAsOperation" }); /** - * HadoopJob archiveUris. - * @member {Array.} archiveUris - * @memberof google.cloud.dataproc.v1.HadoopJob + * Calls SubmitJobAsOperation. + * @function submitJobAsOperation + * @memberof google.cloud.dataproc.v1.JobController * @instance + * @param {google.cloud.dataproc.v1.ISubmitJobRequest} request SubmitJobRequest message or plain object + * @returns {Promise} Promise + * @variation 2 */ - HadoopJob.prototype.archiveUris = $util.emptyArray; /** - * HadoopJob properties. - * @member {Object.} properties - * @memberof google.cloud.dataproc.v1.HadoopJob - * @instance + * Callback as used by {@link google.cloud.dataproc.v1.JobController|getJob}. + * @memberof google.cloud.dataproc.v1.JobController + * @typedef GetJobCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {google.cloud.dataproc.v1.Job} [response] Job */ - HadoopJob.prototype.properties = $util.emptyObject; /** - * HadoopJob loggingConfig. - * @member {google.cloud.dataproc.v1.ILoggingConfig|null|undefined} loggingConfig - * @memberof google.cloud.dataproc.v1.HadoopJob + * Calls GetJob. + * @function getJob + * @memberof google.cloud.dataproc.v1.JobController * @instance + * @param {google.cloud.dataproc.v1.IGetJobRequest} request GetJobRequest message or plain object + * @param {google.cloud.dataproc.v1.JobController.GetJobCallback} callback Node-style callback called with the error, if any, and Job + * @returns {undefined} + * @variation 1 */ - HadoopJob.prototype.loggingConfig = null; - - // OneOf field names bound to virtual getters and setters - var $oneOfFields; + Object.defineProperty(JobController.prototype.getJob = function getJob(request, callback) { + return this.rpcCall(getJob, $root.google.cloud.dataproc.v1.GetJobRequest, $root.google.cloud.dataproc.v1.Job, request, callback); + }, "name", { value: "GetJob" }); /** - * HadoopJob driver. - * @member {"mainJarFileUri"|"mainClass"|undefined} driver - * @memberof google.cloud.dataproc.v1.HadoopJob + * Calls GetJob. + * @function getJob + * @memberof google.cloud.dataproc.v1.JobController * @instance + * @param {google.cloud.dataproc.v1.IGetJobRequest} request GetJobRequest message or plain object + * @returns {Promise} Promise + * @variation 2 */ - Object.defineProperty(HadoopJob.prototype, "driver", { - get: $util.oneOfGetter($oneOfFields = ["mainJarFileUri", "mainClass"]), - set: $util.oneOfSetter($oneOfFields) - }); /** - * Creates a new HadoopJob instance using the specified properties. - * @function create - * @memberof google.cloud.dataproc.v1.HadoopJob - * @static - * @param {google.cloud.dataproc.v1.IHadoopJob=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.HadoopJob} HadoopJob instance + * Callback as used by {@link google.cloud.dataproc.v1.JobController|listJobs}. + * @memberof google.cloud.dataproc.v1.JobController + * @typedef ListJobsCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {google.cloud.dataproc.v1.ListJobsResponse} [response] ListJobsResponse */ - HadoopJob.create = function create(properties) { - return new HadoopJob(properties); - }; /** - * Encodes the specified HadoopJob message. Does not implicitly {@link google.cloud.dataproc.v1.HadoopJob.verify|verify} messages. - * @function encode - * @memberof google.cloud.dataproc.v1.HadoopJob - * @static - * @param {google.cloud.dataproc.v1.IHadoopJob} message HadoopJob message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer + * Calls ListJobs. + * @function listJobs + * @memberof google.cloud.dataproc.v1.JobController + * @instance + * @param {google.cloud.dataproc.v1.IListJobsRequest} request ListJobsRequest message or plain object + * @param {google.cloud.dataproc.v1.JobController.ListJobsCallback} callback Node-style callback called with the error, if any, and ListJobsResponse + * @returns {undefined} + * @variation 1 */ - HadoopJob.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.mainJarFileUri != null && Object.hasOwnProperty.call(message, "mainJarFileUri")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.mainJarFileUri); - if (message.mainClass != null && Object.hasOwnProperty.call(message, "mainClass")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.mainClass); - if (message.args != null && message.args.length) - for (var i = 0; i < message.args.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.args[i]); - if (message.jarFileUris != null && message.jarFileUris.length) - for (var i = 0; i < message.jarFileUris.length; ++i) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.jarFileUris[i]); - if (message.fileUris != null && message.fileUris.length) - for (var i = 0; i < message.fileUris.length; ++i) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.fileUris[i]); - if (message.archiveUris != null && message.archiveUris.length) - for (var i = 0; i < message.archiveUris.length; ++i) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.archiveUris[i]); - if (message.properties != null && Object.hasOwnProperty.call(message, "properties")) - for (var keys = Object.keys(message.properties), i = 0; i < keys.length; ++i) - writer.uint32(/* id 7, wireType 2 =*/58).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.properties[keys[i]]).ldelim(); - if (message.loggingConfig != null && Object.hasOwnProperty.call(message, "loggingConfig")) - $root.google.cloud.dataproc.v1.LoggingConfig.encode(message.loggingConfig, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim(); - return writer; - }; + Object.defineProperty(JobController.prototype.listJobs = function listJobs(request, callback) { + return this.rpcCall(listJobs, $root.google.cloud.dataproc.v1.ListJobsRequest, $root.google.cloud.dataproc.v1.ListJobsResponse, request, callback); + }, "name", { value: "ListJobs" }); /** - * Encodes the specified HadoopJob message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.HadoopJob.verify|verify} messages. - * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.HadoopJob - * @static - * @param {google.cloud.dataproc.v1.IHadoopJob} message HadoopJob message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer + * Calls ListJobs. + * @function listJobs + * @memberof google.cloud.dataproc.v1.JobController + * @instance + * @param {google.cloud.dataproc.v1.IListJobsRequest} request ListJobsRequest message or plain object + * @returns {Promise} Promise + * @variation 2 */ - HadoopJob.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; /** - * Decodes a HadoopJob message from the specified reader or buffer. - * @function decode - * @memberof google.cloud.dataproc.v1.HadoopJob - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.HadoopJob} HadoopJob - * @throws {Error} If the payload is not a reader or valid buffer + * Callback as used by {@link google.cloud.dataproc.v1.JobController|updateJob}. + * @memberof google.cloud.dataproc.v1.JobController + * @typedef UpdateJobCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {google.cloud.dataproc.v1.Job} [response] Job + */ + + /** + * Calls UpdateJob. + * @function updateJob + * @memberof google.cloud.dataproc.v1.JobController + * @instance + * @param {google.cloud.dataproc.v1.IUpdateJobRequest} request UpdateJobRequest message or plain object + * @param {google.cloud.dataproc.v1.JobController.UpdateJobCallback} callback Node-style callback called with the error, if any, and Job + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(JobController.prototype.updateJob = function updateJob(request, callback) { + return this.rpcCall(updateJob, $root.google.cloud.dataproc.v1.UpdateJobRequest, $root.google.cloud.dataproc.v1.Job, request, callback); + }, "name", { value: "UpdateJob" }); + + /** + * Calls UpdateJob. + * @function updateJob + * @memberof google.cloud.dataproc.v1.JobController + * @instance + * @param {google.cloud.dataproc.v1.IUpdateJobRequest} request UpdateJobRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + + /** + * Callback as used by {@link google.cloud.dataproc.v1.JobController|cancelJob}. + * @memberof google.cloud.dataproc.v1.JobController + * @typedef CancelJobCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {google.cloud.dataproc.v1.Job} [response] Job + */ + + /** + * Calls CancelJob. + * @function cancelJob + * @memberof google.cloud.dataproc.v1.JobController + * @instance + * @param {google.cloud.dataproc.v1.ICancelJobRequest} request CancelJobRequest message or plain object + * @param {google.cloud.dataproc.v1.JobController.CancelJobCallback} callback Node-style callback called with the error, if any, and Job + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(JobController.prototype.cancelJob = function cancelJob(request, callback) { + return this.rpcCall(cancelJob, $root.google.cloud.dataproc.v1.CancelJobRequest, $root.google.cloud.dataproc.v1.Job, request, callback); + }, "name", { value: "CancelJob" }); + + /** + * Calls CancelJob. + * @function cancelJob + * @memberof google.cloud.dataproc.v1.JobController + * @instance + * @param {google.cloud.dataproc.v1.ICancelJobRequest} request CancelJobRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + + /** + * Callback as used by {@link google.cloud.dataproc.v1.JobController|deleteJob}. + * @memberof google.cloud.dataproc.v1.JobController + * @typedef DeleteJobCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {google.protobuf.Empty} [response] Empty + */ + + /** + * Calls DeleteJob. + * @function deleteJob + * @memberof google.cloud.dataproc.v1.JobController + * @instance + * @param {google.cloud.dataproc.v1.IDeleteJobRequest} request DeleteJobRequest message or plain object + * @param {google.cloud.dataproc.v1.JobController.DeleteJobCallback} callback Node-style callback called with the error, if any, and Empty + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(JobController.prototype.deleteJob = function deleteJob(request, callback) { + return this.rpcCall(deleteJob, $root.google.cloud.dataproc.v1.DeleteJobRequest, $root.google.protobuf.Empty, request, callback); + }, "name", { value: "DeleteJob" }); + + /** + * Calls DeleteJob. + * @function deleteJob + * @memberof google.cloud.dataproc.v1.JobController + * @instance + * @param {google.cloud.dataproc.v1.IDeleteJobRequest} request DeleteJobRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + + return JobController; + })(); + + v1.LoggingConfig = (function() { + + /** + * Properties of a LoggingConfig. + * @memberof google.cloud.dataproc.v1 + * @interface ILoggingConfig + * @property {Object.|null} [driverLogLevels] LoggingConfig driverLogLevels + */ + + /** + * Constructs a new LoggingConfig. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a LoggingConfig. + * @implements ILoggingConfig + * @constructor + * @param {google.cloud.dataproc.v1.ILoggingConfig=} [properties] Properties to set + */ + function LoggingConfig(properties) { + this.driverLogLevels = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * LoggingConfig driverLogLevels. + * @member {Object.} driverLogLevels + * @memberof google.cloud.dataproc.v1.LoggingConfig + * @instance + */ + LoggingConfig.prototype.driverLogLevels = $util.emptyObject; + + /** + * Creates a new LoggingConfig instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1.LoggingConfig + * @static + * @param {google.cloud.dataproc.v1.ILoggingConfig=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.LoggingConfig} LoggingConfig instance + */ + LoggingConfig.create = function create(properties) { + return new LoggingConfig(properties); + }; + + /** + * Encodes the specified LoggingConfig message. Does not implicitly {@link google.cloud.dataproc.v1.LoggingConfig.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1.LoggingConfig + * @static + * @param {google.cloud.dataproc.v1.ILoggingConfig} message LoggingConfig message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + LoggingConfig.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.driverLogLevels != null && Object.hasOwnProperty.call(message, "driverLogLevels")) + for (var keys = Object.keys(message.driverLogLevels), i = 0; i < keys.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).int32(message.driverLogLevels[keys[i]]).ldelim(); + return writer; + }; + + /** + * Encodes the specified LoggingConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.LoggingConfig.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1.LoggingConfig + * @static + * @param {google.cloud.dataproc.v1.ILoggingConfig} message LoggingConfig message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + LoggingConfig.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a LoggingConfig message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1.LoggingConfig + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1.LoggingConfig} LoggingConfig + * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - HadoopJob.decode = function decode(reader, length) { + LoggingConfig.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.HadoopJob(), key, value; + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.LoggingConfig(), key, value; while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.mainJarFileUri = reader.string(); - break; - } case 2: { - message.mainClass = reader.string(); - break; - } - case 3: { - if (!(message.args && message.args.length)) - message.args = []; - message.args.push(reader.string()); - break; - } - case 4: { - if (!(message.jarFileUris && message.jarFileUris.length)) - message.jarFileUris = []; - message.jarFileUris.push(reader.string()); - break; - } - case 5: { - if (!(message.fileUris && message.fileUris.length)) - message.fileUris = []; - message.fileUris.push(reader.string()); - break; - } - case 6: { - if (!(message.archiveUris && message.archiveUris.length)) - message.archiveUris = []; - message.archiveUris.push(reader.string()); - break; - } - case 7: { - if (message.properties === $util.emptyObject) - message.properties = {}; + if (message.driverLogLevels === $util.emptyObject) + message.driverLogLevels = {}; var end2 = reader.uint32() + reader.pos; key = ""; - value = ""; + value = 0; while (reader.pos < end2) { var tag2 = reader.uint32(); switch (tag2 >>> 3) { @@ -22938,18 +23135,14 @@ key = reader.string(); break; case 2: - value = reader.string(); + value = reader.int32(); break; default: reader.skipType(tag2 & 7); break; } } - message.properties[key] = value; - break; - } - case 8: { - message.loggingConfig = $root.google.cloud.dataproc.v1.LoggingConfig.decode(reader, reader.uint32()); + message.driverLogLevels[key] = value; break; } default: @@ -22961,266 +23154,226 @@ }; /** - * Decodes a HadoopJob message from the specified reader or buffer, length delimited. + * Decodes a LoggingConfig message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.HadoopJob + * @memberof google.cloud.dataproc.v1.LoggingConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.HadoopJob} HadoopJob + * @returns {google.cloud.dataproc.v1.LoggingConfig} LoggingConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - HadoopJob.decodeDelimited = function decodeDelimited(reader) { + LoggingConfig.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a HadoopJob message. + * Verifies a LoggingConfig message. * @function verify - * @memberof google.cloud.dataproc.v1.HadoopJob + * @memberof google.cloud.dataproc.v1.LoggingConfig * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - HadoopJob.verify = function verify(message) { + LoggingConfig.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - var properties = {}; - if (message.mainJarFileUri != null && message.hasOwnProperty("mainJarFileUri")) { - properties.driver = 1; - if (!$util.isString(message.mainJarFileUri)) - return "mainJarFileUri: string expected"; - } - if (message.mainClass != null && message.hasOwnProperty("mainClass")) { - if (properties.driver === 1) - return "driver: multiple values"; - properties.driver = 1; - if (!$util.isString(message.mainClass)) - return "mainClass: string expected"; - } - if (message.args != null && message.hasOwnProperty("args")) { - if (!Array.isArray(message.args)) - return "args: array expected"; - for (var i = 0; i < message.args.length; ++i) - if (!$util.isString(message.args[i])) - return "args: string[] expected"; - } - if (message.jarFileUris != null && message.hasOwnProperty("jarFileUris")) { - if (!Array.isArray(message.jarFileUris)) - return "jarFileUris: array expected"; - for (var i = 0; i < message.jarFileUris.length; ++i) - if (!$util.isString(message.jarFileUris[i])) - return "jarFileUris: string[] expected"; - } - if (message.fileUris != null && message.hasOwnProperty("fileUris")) { - if (!Array.isArray(message.fileUris)) - return "fileUris: array expected"; - for (var i = 0; i < message.fileUris.length; ++i) - if (!$util.isString(message.fileUris[i])) - return "fileUris: string[] expected"; - } - if (message.archiveUris != null && message.hasOwnProperty("archiveUris")) { - if (!Array.isArray(message.archiveUris)) - return "archiveUris: array expected"; - for (var i = 0; i < message.archiveUris.length; ++i) - if (!$util.isString(message.archiveUris[i])) - return "archiveUris: string[] expected"; - } - if (message.properties != null && message.hasOwnProperty("properties")) { - if (!$util.isObject(message.properties)) - return "properties: object expected"; - var key = Object.keys(message.properties); + if (message.driverLogLevels != null && message.hasOwnProperty("driverLogLevels")) { + if (!$util.isObject(message.driverLogLevels)) + return "driverLogLevels: object expected"; + var key = Object.keys(message.driverLogLevels); for (var i = 0; i < key.length; ++i) - if (!$util.isString(message.properties[key[i]])) - return "properties: string{k:string} expected"; - } - if (message.loggingConfig != null && message.hasOwnProperty("loggingConfig")) { - var error = $root.google.cloud.dataproc.v1.LoggingConfig.verify(message.loggingConfig); - if (error) - return "loggingConfig." + error; + switch (message.driverLogLevels[key[i]]) { + default: + return "driverLogLevels: enum value{k:string} expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } } return null; }; /** - * Creates a HadoopJob message from a plain object. Also converts values to their respective internal types. + * Creates a LoggingConfig message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.HadoopJob + * @memberof google.cloud.dataproc.v1.LoggingConfig * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.HadoopJob} HadoopJob + * @returns {google.cloud.dataproc.v1.LoggingConfig} LoggingConfig */ - HadoopJob.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.HadoopJob) + LoggingConfig.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.LoggingConfig) return object; - var message = new $root.google.cloud.dataproc.v1.HadoopJob(); - if (object.mainJarFileUri != null) - message.mainJarFileUri = String(object.mainJarFileUri); - if (object.mainClass != null) - message.mainClass = String(object.mainClass); - if (object.args) { - if (!Array.isArray(object.args)) - throw TypeError(".google.cloud.dataproc.v1.HadoopJob.args: array expected"); - message.args = []; - for (var i = 0; i < object.args.length; ++i) - message.args[i] = String(object.args[i]); - } - if (object.jarFileUris) { - if (!Array.isArray(object.jarFileUris)) - throw TypeError(".google.cloud.dataproc.v1.HadoopJob.jarFileUris: array expected"); - message.jarFileUris = []; - for (var i = 0; i < object.jarFileUris.length; ++i) - message.jarFileUris[i] = String(object.jarFileUris[i]); - } - if (object.fileUris) { - if (!Array.isArray(object.fileUris)) - throw TypeError(".google.cloud.dataproc.v1.HadoopJob.fileUris: array expected"); - message.fileUris = []; - for (var i = 0; i < object.fileUris.length; ++i) - message.fileUris[i] = String(object.fileUris[i]); - } - if (object.archiveUris) { - if (!Array.isArray(object.archiveUris)) - throw TypeError(".google.cloud.dataproc.v1.HadoopJob.archiveUris: array expected"); - message.archiveUris = []; - for (var i = 0; i < object.archiveUris.length; ++i) - message.archiveUris[i] = String(object.archiveUris[i]); - } - if (object.properties) { - if (typeof object.properties !== "object") - throw TypeError(".google.cloud.dataproc.v1.HadoopJob.properties: object expected"); - message.properties = {}; - for (var keys = Object.keys(object.properties), i = 0; i < keys.length; ++i) - message.properties[keys[i]] = String(object.properties[keys[i]]); - } - if (object.loggingConfig != null) { - if (typeof object.loggingConfig !== "object") - throw TypeError(".google.cloud.dataproc.v1.HadoopJob.loggingConfig: object expected"); - message.loggingConfig = $root.google.cloud.dataproc.v1.LoggingConfig.fromObject(object.loggingConfig); + var message = new $root.google.cloud.dataproc.v1.LoggingConfig(); + if (object.driverLogLevels) { + if (typeof object.driverLogLevels !== "object") + throw TypeError(".google.cloud.dataproc.v1.LoggingConfig.driverLogLevels: object expected"); + message.driverLogLevels = {}; + for (var keys = Object.keys(object.driverLogLevels), i = 0; i < keys.length; ++i) + switch (object.driverLogLevels[keys[i]]) { + default: + if (typeof object.driverLogLevels[keys[i]] === "number") { + message.driverLogLevels[keys[i]] = object.driverLogLevels[keys[i]]; + break; + } + break; + case "LEVEL_UNSPECIFIED": + case 0: + message.driverLogLevels[keys[i]] = 0; + break; + case "ALL": + case 1: + message.driverLogLevels[keys[i]] = 1; + break; + case "TRACE": + case 2: + message.driverLogLevels[keys[i]] = 2; + break; + case "DEBUG": + case 3: + message.driverLogLevels[keys[i]] = 3; + break; + case "INFO": + case 4: + message.driverLogLevels[keys[i]] = 4; + break; + case "WARN": + case 5: + message.driverLogLevels[keys[i]] = 5; + break; + case "ERROR": + case 6: + message.driverLogLevels[keys[i]] = 6; + break; + case "FATAL": + case 7: + message.driverLogLevels[keys[i]] = 7; + break; + case "OFF": + case 8: + message.driverLogLevels[keys[i]] = 8; + break; + } } return message; }; /** - * Creates a plain object from a HadoopJob message. Also converts values to other types if specified. + * Creates a plain object from a LoggingConfig message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.HadoopJob + * @memberof google.cloud.dataproc.v1.LoggingConfig * @static - * @param {google.cloud.dataproc.v1.HadoopJob} message HadoopJob + * @param {google.cloud.dataproc.v1.LoggingConfig} message LoggingConfig * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - HadoopJob.toObject = function toObject(message, options) { + LoggingConfig.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.arrays || options.defaults) { - object.args = []; - object.jarFileUris = []; - object.fileUris = []; - object.archiveUris = []; - } if (options.objects || options.defaults) - object.properties = {}; - if (options.defaults) - object.loggingConfig = null; - if (message.mainJarFileUri != null && message.hasOwnProperty("mainJarFileUri")) { - object.mainJarFileUri = message.mainJarFileUri; - if (options.oneofs) - object.driver = "mainJarFileUri"; - } - if (message.mainClass != null && message.hasOwnProperty("mainClass")) { - object.mainClass = message.mainClass; - if (options.oneofs) - object.driver = "mainClass"; - } - if (message.args && message.args.length) { - object.args = []; - for (var j = 0; j < message.args.length; ++j) - object.args[j] = message.args[j]; - } - if (message.jarFileUris && message.jarFileUris.length) { - object.jarFileUris = []; - for (var j = 0; j < message.jarFileUris.length; ++j) - object.jarFileUris[j] = message.jarFileUris[j]; - } - if (message.fileUris && message.fileUris.length) { - object.fileUris = []; - for (var j = 0; j < message.fileUris.length; ++j) - object.fileUris[j] = message.fileUris[j]; - } - if (message.archiveUris && message.archiveUris.length) { - object.archiveUris = []; - for (var j = 0; j < message.archiveUris.length; ++j) - object.archiveUris[j] = message.archiveUris[j]; - } + object.driverLogLevels = {}; var keys2; - if (message.properties && (keys2 = Object.keys(message.properties)).length) { - object.properties = {}; + if (message.driverLogLevels && (keys2 = Object.keys(message.driverLogLevels)).length) { + object.driverLogLevels = {}; for (var j = 0; j < keys2.length; ++j) - object.properties[keys2[j]] = message.properties[keys2[j]]; + object.driverLogLevels[keys2[j]] = options.enums === String ? $root.google.cloud.dataproc.v1.LoggingConfig.Level[message.driverLogLevels[keys2[j]]] === undefined ? message.driverLogLevels[keys2[j]] : $root.google.cloud.dataproc.v1.LoggingConfig.Level[message.driverLogLevels[keys2[j]]] : message.driverLogLevels[keys2[j]]; } - if (message.loggingConfig != null && message.hasOwnProperty("loggingConfig")) - object.loggingConfig = $root.google.cloud.dataproc.v1.LoggingConfig.toObject(message.loggingConfig, options); return object; }; /** - * Converts this HadoopJob to JSON. + * Converts this LoggingConfig to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.HadoopJob + * @memberof google.cloud.dataproc.v1.LoggingConfig * @instance * @returns {Object.} JSON object */ - HadoopJob.prototype.toJSON = function toJSON() { + LoggingConfig.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for HadoopJob + * Gets the default type url for LoggingConfig * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.HadoopJob + * @memberof google.cloud.dataproc.v1.LoggingConfig * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - HadoopJob.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + LoggingConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.HadoopJob"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.LoggingConfig"; }; - return HadoopJob; - })(); - - v1.SparkJob = (function() { - /** - * Properties of a SparkJob. - * @memberof google.cloud.dataproc.v1 - * @interface ISparkJob - * @property {string|null} [mainJarFileUri] SparkJob mainJarFileUri - * @property {string|null} [mainClass] SparkJob mainClass - * @property {Array.|null} [args] SparkJob args - * @property {Array.|null} [jarFileUris] SparkJob jarFileUris - * @property {Array.|null} [fileUris] SparkJob fileUris - * @property {Array.|null} [archiveUris] SparkJob archiveUris - * @property {Object.|null} [properties] SparkJob properties - * @property {google.cloud.dataproc.v1.ILoggingConfig|null} [loggingConfig] SparkJob loggingConfig - */ - + * Level enum. + * @name google.cloud.dataproc.v1.LoggingConfig.Level + * @enum {number} + * @property {number} LEVEL_UNSPECIFIED=0 LEVEL_UNSPECIFIED value + * @property {number} ALL=1 ALL value + * @property {number} TRACE=2 TRACE value + * @property {number} DEBUG=3 DEBUG value + * @property {number} INFO=4 INFO value + * @property {number} WARN=5 WARN value + * @property {number} ERROR=6 ERROR value + * @property {number} FATAL=7 FATAL value + * @property {number} OFF=8 OFF value + */ + LoggingConfig.Level = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "LEVEL_UNSPECIFIED"] = 0; + values[valuesById[1] = "ALL"] = 1; + values[valuesById[2] = "TRACE"] = 2; + values[valuesById[3] = "DEBUG"] = 3; + values[valuesById[4] = "INFO"] = 4; + values[valuesById[5] = "WARN"] = 5; + values[valuesById[6] = "ERROR"] = 6; + values[valuesById[7] = "FATAL"] = 7; + values[valuesById[8] = "OFF"] = 8; + return values; + })(); + + return LoggingConfig; + })(); + + v1.HadoopJob = (function() { + /** - * Constructs a new SparkJob. + * Properties of a HadoopJob. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a SparkJob. - * @implements ISparkJob + * @interface IHadoopJob + * @property {string|null} [mainJarFileUri] HadoopJob mainJarFileUri + * @property {string|null} [mainClass] HadoopJob mainClass + * @property {Array.|null} [args] HadoopJob args + * @property {Array.|null} [jarFileUris] HadoopJob jarFileUris + * @property {Array.|null} [fileUris] HadoopJob fileUris + * @property {Array.|null} [archiveUris] HadoopJob archiveUris + * @property {Object.|null} [properties] HadoopJob properties + * @property {google.cloud.dataproc.v1.ILoggingConfig|null} [loggingConfig] HadoopJob loggingConfig + */ + + /** + * Constructs a new HadoopJob. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a HadoopJob. + * @implements IHadoopJob * @constructor - * @param {google.cloud.dataproc.v1.ISparkJob=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IHadoopJob=} [properties] Properties to set */ - function SparkJob(properties) { + function HadoopJob(properties) { this.args = []; this.jarFileUris = []; this.fileUris = []; @@ -23233,105 +23386,105 @@ } /** - * SparkJob mainJarFileUri. + * HadoopJob mainJarFileUri. * @member {string|null|undefined} mainJarFileUri - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @instance */ - SparkJob.prototype.mainJarFileUri = null; + HadoopJob.prototype.mainJarFileUri = null; /** - * SparkJob mainClass. + * HadoopJob mainClass. * @member {string|null|undefined} mainClass - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @instance */ - SparkJob.prototype.mainClass = null; + HadoopJob.prototype.mainClass = null; /** - * SparkJob args. + * HadoopJob args. * @member {Array.} args - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @instance */ - SparkJob.prototype.args = $util.emptyArray; + HadoopJob.prototype.args = $util.emptyArray; /** - * SparkJob jarFileUris. + * HadoopJob jarFileUris. * @member {Array.} jarFileUris - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @instance */ - SparkJob.prototype.jarFileUris = $util.emptyArray; + HadoopJob.prototype.jarFileUris = $util.emptyArray; /** - * SparkJob fileUris. + * HadoopJob fileUris. * @member {Array.} fileUris - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @instance */ - SparkJob.prototype.fileUris = $util.emptyArray; + HadoopJob.prototype.fileUris = $util.emptyArray; /** - * SparkJob archiveUris. + * HadoopJob archiveUris. * @member {Array.} archiveUris - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @instance */ - SparkJob.prototype.archiveUris = $util.emptyArray; + HadoopJob.prototype.archiveUris = $util.emptyArray; /** - * SparkJob properties. + * HadoopJob properties. * @member {Object.} properties - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @instance */ - SparkJob.prototype.properties = $util.emptyObject; + HadoopJob.prototype.properties = $util.emptyObject; /** - * SparkJob loggingConfig. + * HadoopJob loggingConfig. * @member {google.cloud.dataproc.v1.ILoggingConfig|null|undefined} loggingConfig - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @instance */ - SparkJob.prototype.loggingConfig = null; + HadoopJob.prototype.loggingConfig = null; // OneOf field names bound to virtual getters and setters var $oneOfFields; /** - * SparkJob driver. + * HadoopJob driver. * @member {"mainJarFileUri"|"mainClass"|undefined} driver - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @instance */ - Object.defineProperty(SparkJob.prototype, "driver", { + Object.defineProperty(HadoopJob.prototype, "driver", { get: $util.oneOfGetter($oneOfFields = ["mainJarFileUri", "mainClass"]), set: $util.oneOfSetter($oneOfFields) }); /** - * Creates a new SparkJob instance using the specified properties. + * Creates a new HadoopJob instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @static - * @param {google.cloud.dataproc.v1.ISparkJob=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.SparkJob} SparkJob instance + * @param {google.cloud.dataproc.v1.IHadoopJob=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.HadoopJob} HadoopJob instance */ - SparkJob.create = function create(properties) { - return new SparkJob(properties); + HadoopJob.create = function create(properties) { + return new HadoopJob(properties); }; /** - * Encodes the specified SparkJob message. Does not implicitly {@link google.cloud.dataproc.v1.SparkJob.verify|verify} messages. + * Encodes the specified HadoopJob message. Does not implicitly {@link google.cloud.dataproc.v1.HadoopJob.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @static - * @param {google.cloud.dataproc.v1.ISparkJob} message SparkJob message or plain object to encode + * @param {google.cloud.dataproc.v1.IHadoopJob} message HadoopJob message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SparkJob.encode = function encode(message, writer) { + HadoopJob.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.mainJarFileUri != null && Object.hasOwnProperty.call(message, "mainJarFileUri")) @@ -23359,33 +23512,33 @@ }; /** - * Encodes the specified SparkJob message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.SparkJob.verify|verify} messages. + * Encodes the specified HadoopJob message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.HadoopJob.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @static - * @param {google.cloud.dataproc.v1.ISparkJob} message SparkJob message or plain object to encode + * @param {google.cloud.dataproc.v1.IHadoopJob} message HadoopJob message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SparkJob.encodeDelimited = function encodeDelimited(message, writer) { + HadoopJob.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SparkJob message from the specified reader or buffer. + * Decodes a HadoopJob message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.SparkJob} SparkJob + * @returns {google.cloud.dataproc.v1.HadoopJob} HadoopJob * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SparkJob.decode = function decode(reader, length) { + HadoopJob.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.SparkJob(), key, value; + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.HadoopJob(), key, value; while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { @@ -23457,30 +23610,30 @@ }; /** - * Decodes a SparkJob message from the specified reader or buffer, length delimited. + * Decodes a HadoopJob message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.SparkJob} SparkJob + * @returns {google.cloud.dataproc.v1.HadoopJob} HadoopJob * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SparkJob.decodeDelimited = function decodeDelimited(reader) { + HadoopJob.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SparkJob message. + * Verifies a HadoopJob message. * @function verify - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SparkJob.verify = function verify(message) { + HadoopJob.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; var properties = {}; @@ -23541,74 +23694,74 @@ }; /** - * Creates a SparkJob message from a plain object. Also converts values to their respective internal types. + * Creates a HadoopJob message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.SparkJob} SparkJob + * @returns {google.cloud.dataproc.v1.HadoopJob} HadoopJob */ - SparkJob.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.SparkJob) + HadoopJob.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.HadoopJob) return object; - var message = new $root.google.cloud.dataproc.v1.SparkJob(); + var message = new $root.google.cloud.dataproc.v1.HadoopJob(); if (object.mainJarFileUri != null) message.mainJarFileUri = String(object.mainJarFileUri); if (object.mainClass != null) message.mainClass = String(object.mainClass); if (object.args) { if (!Array.isArray(object.args)) - throw TypeError(".google.cloud.dataproc.v1.SparkJob.args: array expected"); + throw TypeError(".google.cloud.dataproc.v1.HadoopJob.args: array expected"); message.args = []; for (var i = 0; i < object.args.length; ++i) message.args[i] = String(object.args[i]); } if (object.jarFileUris) { if (!Array.isArray(object.jarFileUris)) - throw TypeError(".google.cloud.dataproc.v1.SparkJob.jarFileUris: array expected"); + throw TypeError(".google.cloud.dataproc.v1.HadoopJob.jarFileUris: array expected"); message.jarFileUris = []; for (var i = 0; i < object.jarFileUris.length; ++i) message.jarFileUris[i] = String(object.jarFileUris[i]); } if (object.fileUris) { if (!Array.isArray(object.fileUris)) - throw TypeError(".google.cloud.dataproc.v1.SparkJob.fileUris: array expected"); + throw TypeError(".google.cloud.dataproc.v1.HadoopJob.fileUris: array expected"); message.fileUris = []; for (var i = 0; i < object.fileUris.length; ++i) message.fileUris[i] = String(object.fileUris[i]); } if (object.archiveUris) { if (!Array.isArray(object.archiveUris)) - throw TypeError(".google.cloud.dataproc.v1.SparkJob.archiveUris: array expected"); + throw TypeError(".google.cloud.dataproc.v1.HadoopJob.archiveUris: array expected"); message.archiveUris = []; for (var i = 0; i < object.archiveUris.length; ++i) message.archiveUris[i] = String(object.archiveUris[i]); } if (object.properties) { if (typeof object.properties !== "object") - throw TypeError(".google.cloud.dataproc.v1.SparkJob.properties: object expected"); + throw TypeError(".google.cloud.dataproc.v1.HadoopJob.properties: object expected"); message.properties = {}; for (var keys = Object.keys(object.properties), i = 0; i < keys.length; ++i) message.properties[keys[i]] = String(object.properties[keys[i]]); } if (object.loggingConfig != null) { if (typeof object.loggingConfig !== "object") - throw TypeError(".google.cloud.dataproc.v1.SparkJob.loggingConfig: object expected"); + throw TypeError(".google.cloud.dataproc.v1.HadoopJob.loggingConfig: object expected"); message.loggingConfig = $root.google.cloud.dataproc.v1.LoggingConfig.fromObject(object.loggingConfig); } return message; }; /** - * Creates a plain object from a SparkJob message. Also converts values to other types if specified. + * Creates a plain object from a HadoopJob message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @static - * @param {google.cloud.dataproc.v1.SparkJob} message SparkJob + * @param {google.cloud.dataproc.v1.HadoopJob} message HadoopJob * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SparkJob.toObject = function toObject(message, options) { + HadoopJob.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; @@ -23664,61 +23817,60 @@ }; /** - * Converts this SparkJob to JSON. + * Converts this HadoopJob to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @instance * @returns {Object.} JSON object */ - SparkJob.prototype.toJSON = function toJSON() { + HadoopJob.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SparkJob + * Gets the default type url for HadoopJob * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.SparkJob + * @memberof google.cloud.dataproc.v1.HadoopJob * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SparkJob.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + HadoopJob.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.SparkJob"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.HadoopJob"; }; - return SparkJob; + return HadoopJob; })(); - v1.PySparkJob = (function() { + v1.SparkJob = (function() { /** - * Properties of a PySparkJob. + * Properties of a SparkJob. * @memberof google.cloud.dataproc.v1 - * @interface IPySparkJob - * @property {string|null} [mainPythonFileUri] PySparkJob mainPythonFileUri - * @property {Array.|null} [args] PySparkJob args - * @property {Array.|null} [pythonFileUris] PySparkJob pythonFileUris - * @property {Array.|null} [jarFileUris] PySparkJob jarFileUris - * @property {Array.|null} [fileUris] PySparkJob fileUris - * @property {Array.|null} [archiveUris] PySparkJob archiveUris - * @property {Object.|null} [properties] PySparkJob properties - * @property {google.cloud.dataproc.v1.ILoggingConfig|null} [loggingConfig] PySparkJob loggingConfig + * @interface ISparkJob + * @property {string|null} [mainJarFileUri] SparkJob mainJarFileUri + * @property {string|null} [mainClass] SparkJob mainClass + * @property {Array.|null} [args] SparkJob args + * @property {Array.|null} [jarFileUris] SparkJob jarFileUris + * @property {Array.|null} [fileUris] SparkJob fileUris + * @property {Array.|null} [archiveUris] SparkJob archiveUris + * @property {Object.|null} [properties] SparkJob properties + * @property {google.cloud.dataproc.v1.ILoggingConfig|null} [loggingConfig] SparkJob loggingConfig */ /** - * Constructs a new PySparkJob. + * Constructs a new SparkJob. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a PySparkJob. - * @implements IPySparkJob + * @classdesc Represents a SparkJob. + * @implements ISparkJob * @constructor - * @param {google.cloud.dataproc.v1.IPySparkJob=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.ISparkJob=} [properties] Properties to set */ - function PySparkJob(properties) { + function SparkJob(properties) { this.args = []; - this.pythonFileUris = []; this.jarFileUris = []; this.fileUris = []; this.archiveUris = []; @@ -23730,101 +23882,114 @@ } /** - * PySparkJob mainPythonFileUri. - * @member {string} mainPythonFileUri - * @memberof google.cloud.dataproc.v1.PySparkJob + * SparkJob mainJarFileUri. + * @member {string|null|undefined} mainJarFileUri + * @memberof google.cloud.dataproc.v1.SparkJob * @instance */ - PySparkJob.prototype.mainPythonFileUri = ""; + SparkJob.prototype.mainJarFileUri = null; /** - * PySparkJob args. - * @member {Array.} args - * @memberof google.cloud.dataproc.v1.PySparkJob - * @instance - */ - PySparkJob.prototype.args = $util.emptyArray; + * SparkJob mainClass. + * @member {string|null|undefined} mainClass + * @memberof google.cloud.dataproc.v1.SparkJob + * @instance + */ + SparkJob.prototype.mainClass = null; /** - * PySparkJob pythonFileUris. - * @member {Array.} pythonFileUris - * @memberof google.cloud.dataproc.v1.PySparkJob + * SparkJob args. + * @member {Array.} args + * @memberof google.cloud.dataproc.v1.SparkJob * @instance */ - PySparkJob.prototype.pythonFileUris = $util.emptyArray; + SparkJob.prototype.args = $util.emptyArray; /** - * PySparkJob jarFileUris. + * SparkJob jarFileUris. * @member {Array.} jarFileUris - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @instance */ - PySparkJob.prototype.jarFileUris = $util.emptyArray; + SparkJob.prototype.jarFileUris = $util.emptyArray; /** - * PySparkJob fileUris. + * SparkJob fileUris. * @member {Array.} fileUris - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @instance */ - PySparkJob.prototype.fileUris = $util.emptyArray; + SparkJob.prototype.fileUris = $util.emptyArray; /** - * PySparkJob archiveUris. + * SparkJob archiveUris. * @member {Array.} archiveUris - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @instance */ - PySparkJob.prototype.archiveUris = $util.emptyArray; + SparkJob.prototype.archiveUris = $util.emptyArray; /** - * PySparkJob properties. + * SparkJob properties. * @member {Object.} properties - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @instance */ - PySparkJob.prototype.properties = $util.emptyObject; + SparkJob.prototype.properties = $util.emptyObject; /** - * PySparkJob loggingConfig. + * SparkJob loggingConfig. * @member {google.cloud.dataproc.v1.ILoggingConfig|null|undefined} loggingConfig - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @instance */ - PySparkJob.prototype.loggingConfig = null; + SparkJob.prototype.loggingConfig = null; + + // OneOf field names bound to virtual getters and setters + var $oneOfFields; /** - * Creates a new PySparkJob instance using the specified properties. + * SparkJob driver. + * @member {"mainJarFileUri"|"mainClass"|undefined} driver + * @memberof google.cloud.dataproc.v1.SparkJob + * @instance + */ + Object.defineProperty(SparkJob.prototype, "driver", { + get: $util.oneOfGetter($oneOfFields = ["mainJarFileUri", "mainClass"]), + set: $util.oneOfSetter($oneOfFields) + }); + + /** + * Creates a new SparkJob instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @static - * @param {google.cloud.dataproc.v1.IPySparkJob=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.PySparkJob} PySparkJob instance + * @param {google.cloud.dataproc.v1.ISparkJob=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.SparkJob} SparkJob instance */ - PySparkJob.create = function create(properties) { - return new PySparkJob(properties); + SparkJob.create = function create(properties) { + return new SparkJob(properties); }; /** - * Encodes the specified PySparkJob message. Does not implicitly {@link google.cloud.dataproc.v1.PySparkJob.verify|verify} messages. + * Encodes the specified SparkJob message. Does not implicitly {@link google.cloud.dataproc.v1.SparkJob.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @static - * @param {google.cloud.dataproc.v1.IPySparkJob} message PySparkJob message or plain object to encode + * @param {google.cloud.dataproc.v1.ISparkJob} message SparkJob message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PySparkJob.encode = function encode(message, writer) { + SparkJob.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.mainPythonFileUri != null && Object.hasOwnProperty.call(message, "mainPythonFileUri")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.mainPythonFileUri); + if (message.mainJarFileUri != null && Object.hasOwnProperty.call(message, "mainJarFileUri")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.mainJarFileUri); + if (message.mainClass != null && Object.hasOwnProperty.call(message, "mainClass")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.mainClass); if (message.args != null && message.args.length) for (var i = 0; i < message.args.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.args[i]); - if (message.pythonFileUris != null && message.pythonFileUris.length) - for (var i = 0; i < message.pythonFileUris.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.pythonFileUris[i]); + writer.uint32(/* id 3, wireType 2 =*/26).string(message.args[i]); if (message.jarFileUris != null && message.jarFileUris.length) for (var i = 0; i < message.jarFileUris.length; ++i) writer.uint32(/* id 4, wireType 2 =*/34).string(message.jarFileUris[i]); @@ -23843,50 +24008,48 @@ }; /** - * Encodes the specified PySparkJob message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.PySparkJob.verify|verify} messages. + * Encodes the specified SparkJob message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.SparkJob.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @static - * @param {google.cloud.dataproc.v1.IPySparkJob} message PySparkJob message or plain object to encode + * @param {google.cloud.dataproc.v1.ISparkJob} message SparkJob message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PySparkJob.encodeDelimited = function encodeDelimited(message, writer) { + SparkJob.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a PySparkJob message from the specified reader or buffer. + * Decodes a SparkJob message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.PySparkJob} PySparkJob + * @returns {google.cloud.dataproc.v1.SparkJob} SparkJob * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PySparkJob.decode = function decode(reader, length) { + SparkJob.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.PySparkJob(), key, value; + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.SparkJob(), key, value; while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.mainPythonFileUri = reader.string(); + message.mainJarFileUri = reader.string(); break; } case 2: { - if (!(message.args && message.args.length)) - message.args = []; - message.args.push(reader.string()); + message.mainClass = reader.string(); break; } case 3: { - if (!(message.pythonFileUris && message.pythonFileUris.length)) - message.pythonFileUris = []; - message.pythonFileUris.push(reader.string()); + if (!(message.args && message.args.length)) + message.args = []; + message.args.push(reader.string()); break; } case 4: { @@ -23943,35 +24106,45 @@ }; /** - * Decodes a PySparkJob message from the specified reader or buffer, length delimited. + * Decodes a SparkJob message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.PySparkJob} PySparkJob + * @returns {google.cloud.dataproc.v1.SparkJob} SparkJob * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PySparkJob.decodeDelimited = function decodeDelimited(reader) { + SparkJob.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a PySparkJob message. + * Verifies a SparkJob message. * @function verify - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - PySparkJob.verify = function verify(message) { + SparkJob.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.mainPythonFileUri != null && message.hasOwnProperty("mainPythonFileUri")) - if (!$util.isString(message.mainPythonFileUri)) - return "mainPythonFileUri: string expected"; + var properties = {}; + if (message.mainJarFileUri != null && message.hasOwnProperty("mainJarFileUri")) { + properties.driver = 1; + if (!$util.isString(message.mainJarFileUri)) + return "mainJarFileUri: string expected"; + } + if (message.mainClass != null && message.hasOwnProperty("mainClass")) { + if (properties.driver === 1) + return "driver: multiple values"; + properties.driver = 1; + if (!$util.isString(message.mainClass)) + return "mainClass: string expected"; + } if (message.args != null && message.hasOwnProperty("args")) { if (!Array.isArray(message.args)) return "args: array expected"; @@ -23979,13 +24152,6 @@ if (!$util.isString(message.args[i])) return "args: string[] expected"; } - if (message.pythonFileUris != null && message.hasOwnProperty("pythonFileUris")) { - if (!Array.isArray(message.pythonFileUris)) - return "pythonFileUris: array expected"; - for (var i = 0; i < message.pythonFileUris.length; ++i) - if (!$util.isString(message.pythonFileUris[i])) - return "pythonFileUris: string[] expected"; - } if (message.jarFileUris != null && message.hasOwnProperty("jarFileUris")) { if (!Array.isArray(message.jarFileUris)) return "jarFileUris: array expected"; @@ -24024,107 +24190,102 @@ }; /** - * Creates a PySparkJob message from a plain object. Also converts values to their respective internal types. + * Creates a SparkJob message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.PySparkJob} PySparkJob + * @returns {google.cloud.dataproc.v1.SparkJob} SparkJob */ - PySparkJob.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.PySparkJob) + SparkJob.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.SparkJob) return object; - var message = new $root.google.cloud.dataproc.v1.PySparkJob(); - if (object.mainPythonFileUri != null) - message.mainPythonFileUri = String(object.mainPythonFileUri); + var message = new $root.google.cloud.dataproc.v1.SparkJob(); + if (object.mainJarFileUri != null) + message.mainJarFileUri = String(object.mainJarFileUri); + if (object.mainClass != null) + message.mainClass = String(object.mainClass); if (object.args) { if (!Array.isArray(object.args)) - throw TypeError(".google.cloud.dataproc.v1.PySparkJob.args: array expected"); + throw TypeError(".google.cloud.dataproc.v1.SparkJob.args: array expected"); message.args = []; for (var i = 0; i < object.args.length; ++i) message.args[i] = String(object.args[i]); } - if (object.pythonFileUris) { - if (!Array.isArray(object.pythonFileUris)) - throw TypeError(".google.cloud.dataproc.v1.PySparkJob.pythonFileUris: array expected"); - message.pythonFileUris = []; - for (var i = 0; i < object.pythonFileUris.length; ++i) - message.pythonFileUris[i] = String(object.pythonFileUris[i]); - } if (object.jarFileUris) { if (!Array.isArray(object.jarFileUris)) - throw TypeError(".google.cloud.dataproc.v1.PySparkJob.jarFileUris: array expected"); + throw TypeError(".google.cloud.dataproc.v1.SparkJob.jarFileUris: array expected"); message.jarFileUris = []; for (var i = 0; i < object.jarFileUris.length; ++i) message.jarFileUris[i] = String(object.jarFileUris[i]); } if (object.fileUris) { if (!Array.isArray(object.fileUris)) - throw TypeError(".google.cloud.dataproc.v1.PySparkJob.fileUris: array expected"); + throw TypeError(".google.cloud.dataproc.v1.SparkJob.fileUris: array expected"); message.fileUris = []; for (var i = 0; i < object.fileUris.length; ++i) message.fileUris[i] = String(object.fileUris[i]); } if (object.archiveUris) { if (!Array.isArray(object.archiveUris)) - throw TypeError(".google.cloud.dataproc.v1.PySparkJob.archiveUris: array expected"); + throw TypeError(".google.cloud.dataproc.v1.SparkJob.archiveUris: array expected"); message.archiveUris = []; for (var i = 0; i < object.archiveUris.length; ++i) message.archiveUris[i] = String(object.archiveUris[i]); } if (object.properties) { if (typeof object.properties !== "object") - throw TypeError(".google.cloud.dataproc.v1.PySparkJob.properties: object expected"); + throw TypeError(".google.cloud.dataproc.v1.SparkJob.properties: object expected"); message.properties = {}; for (var keys = Object.keys(object.properties), i = 0; i < keys.length; ++i) message.properties[keys[i]] = String(object.properties[keys[i]]); } if (object.loggingConfig != null) { if (typeof object.loggingConfig !== "object") - throw TypeError(".google.cloud.dataproc.v1.PySparkJob.loggingConfig: object expected"); + throw TypeError(".google.cloud.dataproc.v1.SparkJob.loggingConfig: object expected"); message.loggingConfig = $root.google.cloud.dataproc.v1.LoggingConfig.fromObject(object.loggingConfig); } return message; }; /** - * Creates a plain object from a PySparkJob message. Also converts values to other types if specified. + * Creates a plain object from a SparkJob message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @static - * @param {google.cloud.dataproc.v1.PySparkJob} message PySparkJob + * @param {google.cloud.dataproc.v1.SparkJob} message SparkJob * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - PySparkJob.toObject = function toObject(message, options) { + SparkJob.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.arrays || options.defaults) { object.args = []; - object.pythonFileUris = []; object.jarFileUris = []; object.fileUris = []; object.archiveUris = []; } if (options.objects || options.defaults) object.properties = {}; - if (options.defaults) { - object.mainPythonFileUri = ""; + if (options.defaults) object.loggingConfig = null; + if (message.mainJarFileUri != null && message.hasOwnProperty("mainJarFileUri")) { + object.mainJarFileUri = message.mainJarFileUri; + if (options.oneofs) + object.driver = "mainJarFileUri"; + } + if (message.mainClass != null && message.hasOwnProperty("mainClass")) { + object.mainClass = message.mainClass; + if (options.oneofs) + object.driver = "mainClass"; } - if (message.mainPythonFileUri != null && message.hasOwnProperty("mainPythonFileUri")) - object.mainPythonFileUri = message.mainPythonFileUri; if (message.args && message.args.length) { object.args = []; for (var j = 0; j < message.args.length; ++j) object.args[j] = message.args[j]; } - if (message.pythonFileUris && message.pythonFileUris.length) { - object.pythonFileUris = []; - for (var j = 0; j < message.pythonFileUris.length; ++j) - object.pythonFileUris[j] = message.pythonFileUris[j]; - } if (message.jarFileUris && message.jarFileUris.length) { object.jarFileUris = []; for (var j = 0; j < message.jarFileUris.length; ++j) @@ -24152,53 +24313,65 @@ }; /** - * Converts this PySparkJob to JSON. + * Converts this SparkJob to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @instance * @returns {Object.} JSON object */ - PySparkJob.prototype.toJSON = function toJSON() { + SparkJob.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for PySparkJob + * Gets the default type url for SparkJob * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.PySparkJob + * @memberof google.cloud.dataproc.v1.SparkJob * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - PySparkJob.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SparkJob.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.PySparkJob"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.SparkJob"; }; - return PySparkJob; + return SparkJob; })(); - v1.QueryList = (function() { + v1.PySparkJob = (function() { /** - * Properties of a QueryList. + * Properties of a PySparkJob. * @memberof google.cloud.dataproc.v1 - * @interface IQueryList - * @property {Array.|null} [queries] QueryList queries + * @interface IPySparkJob + * @property {string|null} [mainPythonFileUri] PySparkJob mainPythonFileUri + * @property {Array.|null} [args] PySparkJob args + * @property {Array.|null} [pythonFileUris] PySparkJob pythonFileUris + * @property {Array.|null} [jarFileUris] PySparkJob jarFileUris + * @property {Array.|null} [fileUris] PySparkJob fileUris + * @property {Array.|null} [archiveUris] PySparkJob archiveUris + * @property {Object.|null} [properties] PySparkJob properties + * @property {google.cloud.dataproc.v1.ILoggingConfig|null} [loggingConfig] PySparkJob loggingConfig */ /** - * Constructs a new QueryList. + * Constructs a new PySparkJob. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a QueryList. - * @implements IQueryList + * @classdesc Represents a PySparkJob. + * @implements IPySparkJob * @constructor - * @param {google.cloud.dataproc.v1.IQueryList=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IPySparkJob=} [properties] Properties to set */ - function QueryList(properties) { - this.queries = []; + function PySparkJob(properties) { + this.args = []; + this.pythonFileUris = []; + this.jarFileUris = []; + this.fileUris = []; + this.archiveUris = []; + this.properties = {}; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -24206,78 +24379,208 @@ } /** - * QueryList queries. - * @member {Array.} queries - * @memberof google.cloud.dataproc.v1.QueryList + * PySparkJob mainPythonFileUri. + * @member {string} mainPythonFileUri + * @memberof google.cloud.dataproc.v1.PySparkJob * @instance */ - QueryList.prototype.queries = $util.emptyArray; + PySparkJob.prototype.mainPythonFileUri = ""; /** - * Creates a new QueryList instance using the specified properties. + * PySparkJob args. + * @member {Array.} args + * @memberof google.cloud.dataproc.v1.PySparkJob + * @instance + */ + PySparkJob.prototype.args = $util.emptyArray; + + /** + * PySparkJob pythonFileUris. + * @member {Array.} pythonFileUris + * @memberof google.cloud.dataproc.v1.PySparkJob + * @instance + */ + PySparkJob.prototype.pythonFileUris = $util.emptyArray; + + /** + * PySparkJob jarFileUris. + * @member {Array.} jarFileUris + * @memberof google.cloud.dataproc.v1.PySparkJob + * @instance + */ + PySparkJob.prototype.jarFileUris = $util.emptyArray; + + /** + * PySparkJob fileUris. + * @member {Array.} fileUris + * @memberof google.cloud.dataproc.v1.PySparkJob + * @instance + */ + PySparkJob.prototype.fileUris = $util.emptyArray; + + /** + * PySparkJob archiveUris. + * @member {Array.} archiveUris + * @memberof google.cloud.dataproc.v1.PySparkJob + * @instance + */ + PySparkJob.prototype.archiveUris = $util.emptyArray; + + /** + * PySparkJob properties. + * @member {Object.} properties + * @memberof google.cloud.dataproc.v1.PySparkJob + * @instance + */ + PySparkJob.prototype.properties = $util.emptyObject; + + /** + * PySparkJob loggingConfig. + * @member {google.cloud.dataproc.v1.ILoggingConfig|null|undefined} loggingConfig + * @memberof google.cloud.dataproc.v1.PySparkJob + * @instance + */ + PySparkJob.prototype.loggingConfig = null; + + /** + * Creates a new PySparkJob instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.QueryList + * @memberof google.cloud.dataproc.v1.PySparkJob * @static - * @param {google.cloud.dataproc.v1.IQueryList=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.QueryList} QueryList instance + * @param {google.cloud.dataproc.v1.IPySparkJob=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.PySparkJob} PySparkJob instance */ - QueryList.create = function create(properties) { - return new QueryList(properties); + PySparkJob.create = function create(properties) { + return new PySparkJob(properties); }; /** - * Encodes the specified QueryList message. Does not implicitly {@link google.cloud.dataproc.v1.QueryList.verify|verify} messages. + * Encodes the specified PySparkJob message. Does not implicitly {@link google.cloud.dataproc.v1.PySparkJob.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.QueryList + * @memberof google.cloud.dataproc.v1.PySparkJob * @static - * @param {google.cloud.dataproc.v1.IQueryList} message QueryList message or plain object to encode + * @param {google.cloud.dataproc.v1.IPySparkJob} message PySparkJob message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - QueryList.encode = function encode(message, writer) { + PySparkJob.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.queries != null && message.queries.length) - for (var i = 0; i < message.queries.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.queries[i]); + if (message.mainPythonFileUri != null && Object.hasOwnProperty.call(message, "mainPythonFileUri")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.mainPythonFileUri); + if (message.args != null && message.args.length) + for (var i = 0; i < message.args.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.args[i]); + if (message.pythonFileUris != null && message.pythonFileUris.length) + for (var i = 0; i < message.pythonFileUris.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.pythonFileUris[i]); + if (message.jarFileUris != null && message.jarFileUris.length) + for (var i = 0; i < message.jarFileUris.length; ++i) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.jarFileUris[i]); + if (message.fileUris != null && message.fileUris.length) + for (var i = 0; i < message.fileUris.length; ++i) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.fileUris[i]); + if (message.archiveUris != null && message.archiveUris.length) + for (var i = 0; i < message.archiveUris.length; ++i) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.archiveUris[i]); + if (message.properties != null && Object.hasOwnProperty.call(message, "properties")) + for (var keys = Object.keys(message.properties), i = 0; i < keys.length; ++i) + writer.uint32(/* id 7, wireType 2 =*/58).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.properties[keys[i]]).ldelim(); + if (message.loggingConfig != null && Object.hasOwnProperty.call(message, "loggingConfig")) + $root.google.cloud.dataproc.v1.LoggingConfig.encode(message.loggingConfig, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim(); return writer; }; /** - * Encodes the specified QueryList message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.QueryList.verify|verify} messages. + * Encodes the specified PySparkJob message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.PySparkJob.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.QueryList + * @memberof google.cloud.dataproc.v1.PySparkJob * @static - * @param {google.cloud.dataproc.v1.IQueryList} message QueryList message or plain object to encode + * @param {google.cloud.dataproc.v1.IPySparkJob} message PySparkJob message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - QueryList.encodeDelimited = function encodeDelimited(message, writer) { + PySparkJob.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a QueryList message from the specified reader or buffer. + * Decodes a PySparkJob message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.QueryList + * @memberof google.cloud.dataproc.v1.PySparkJob * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.QueryList} QueryList + * @returns {google.cloud.dataproc.v1.PySparkJob} PySparkJob * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - QueryList.decode = function decode(reader, length) { + PySparkJob.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.QueryList(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.PySparkJob(), key, value; while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.queries && message.queries.length)) - message.queries = []; - message.queries.push(reader.string()); + message.mainPythonFileUri = reader.string(); + break; + } + case 2: { + if (!(message.args && message.args.length)) + message.args = []; + message.args.push(reader.string()); + break; + } + case 3: { + if (!(message.pythonFileUris && message.pythonFileUris.length)) + message.pythonFileUris = []; + message.pythonFileUris.push(reader.string()); + break; + } + case 4: { + if (!(message.jarFileUris && message.jarFileUris.length)) + message.jarFileUris = []; + message.jarFileUris.push(reader.string()); + break; + } + case 5: { + if (!(message.fileUris && message.fileUris.length)) + message.fileUris = []; + message.fileUris.push(reader.string()); + break; + } + case 6: { + if (!(message.archiveUris && message.archiveUris.length)) + message.archiveUris = []; + message.archiveUris.push(reader.string()); + break; + } + case 7: { + if (message.properties === $util.emptyObject) + message.properties = {}; + var end2 = reader.uint32() + reader.pos; + key = ""; + value = ""; + while (reader.pos < end2) { + var tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.string(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.properties[key] = value; + break; + } + case 8: { + message.loggingConfig = $root.google.cloud.dataproc.v1.LoggingConfig.decode(reader, reader.uint32()); break; } default: @@ -24289,142 +24592,262 @@ }; /** - * Decodes a QueryList message from the specified reader or buffer, length delimited. + * Decodes a PySparkJob message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.QueryList + * @memberof google.cloud.dataproc.v1.PySparkJob * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.QueryList} QueryList + * @returns {google.cloud.dataproc.v1.PySparkJob} PySparkJob * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - QueryList.decodeDelimited = function decodeDelimited(reader) { + PySparkJob.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a QueryList message. + * Verifies a PySparkJob message. * @function verify - * @memberof google.cloud.dataproc.v1.QueryList + * @memberof google.cloud.dataproc.v1.PySparkJob * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - QueryList.verify = function verify(message) { + PySparkJob.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.queries != null && message.hasOwnProperty("queries")) { - if (!Array.isArray(message.queries)) - return "queries: array expected"; - for (var i = 0; i < message.queries.length; ++i) - if (!$util.isString(message.queries[i])) - return "queries: string[] expected"; - } - return null; - }; - - /** - * Creates a QueryList message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof google.cloud.dataproc.v1.QueryList - * @static - * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.QueryList} QueryList - */ - QueryList.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.QueryList) - return object; - var message = new $root.google.cloud.dataproc.v1.QueryList(); - if (object.queries) { - if (!Array.isArray(object.queries)) - throw TypeError(".google.cloud.dataproc.v1.QueryList.queries: array expected"); - message.queries = []; - for (var i = 0; i < object.queries.length; ++i) - message.queries[i] = String(object.queries[i]); + if (message.mainPythonFileUri != null && message.hasOwnProperty("mainPythonFileUri")) + if (!$util.isString(message.mainPythonFileUri)) + return "mainPythonFileUri: string expected"; + if (message.args != null && message.hasOwnProperty("args")) { + if (!Array.isArray(message.args)) + return "args: array expected"; + for (var i = 0; i < message.args.length; ++i) + if (!$util.isString(message.args[i])) + return "args: string[] expected"; } - return message; - }; - + if (message.pythonFileUris != null && message.hasOwnProperty("pythonFileUris")) { + if (!Array.isArray(message.pythonFileUris)) + return "pythonFileUris: array expected"; + for (var i = 0; i < message.pythonFileUris.length; ++i) + if (!$util.isString(message.pythonFileUris[i])) + return "pythonFileUris: string[] expected"; + } + if (message.jarFileUris != null && message.hasOwnProperty("jarFileUris")) { + if (!Array.isArray(message.jarFileUris)) + return "jarFileUris: array expected"; + for (var i = 0; i < message.jarFileUris.length; ++i) + if (!$util.isString(message.jarFileUris[i])) + return "jarFileUris: string[] expected"; + } + if (message.fileUris != null && message.hasOwnProperty("fileUris")) { + if (!Array.isArray(message.fileUris)) + return "fileUris: array expected"; + for (var i = 0; i < message.fileUris.length; ++i) + if (!$util.isString(message.fileUris[i])) + return "fileUris: string[] expected"; + } + if (message.archiveUris != null && message.hasOwnProperty("archiveUris")) { + if (!Array.isArray(message.archiveUris)) + return "archiveUris: array expected"; + for (var i = 0; i < message.archiveUris.length; ++i) + if (!$util.isString(message.archiveUris[i])) + return "archiveUris: string[] expected"; + } + if (message.properties != null && message.hasOwnProperty("properties")) { + if (!$util.isObject(message.properties)) + return "properties: object expected"; + var key = Object.keys(message.properties); + for (var i = 0; i < key.length; ++i) + if (!$util.isString(message.properties[key[i]])) + return "properties: string{k:string} expected"; + } + if (message.loggingConfig != null && message.hasOwnProperty("loggingConfig")) { + var error = $root.google.cloud.dataproc.v1.LoggingConfig.verify(message.loggingConfig); + if (error) + return "loggingConfig." + error; + } + return null; + }; + /** - * Creates a plain object from a QueryList message. Also converts values to other types if specified. + * Creates a PySparkJob message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.PySparkJob + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1.PySparkJob} PySparkJob + */ + PySparkJob.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.PySparkJob) + return object; + var message = new $root.google.cloud.dataproc.v1.PySparkJob(); + if (object.mainPythonFileUri != null) + message.mainPythonFileUri = String(object.mainPythonFileUri); + if (object.args) { + if (!Array.isArray(object.args)) + throw TypeError(".google.cloud.dataproc.v1.PySparkJob.args: array expected"); + message.args = []; + for (var i = 0; i < object.args.length; ++i) + message.args[i] = String(object.args[i]); + } + if (object.pythonFileUris) { + if (!Array.isArray(object.pythonFileUris)) + throw TypeError(".google.cloud.dataproc.v1.PySparkJob.pythonFileUris: array expected"); + message.pythonFileUris = []; + for (var i = 0; i < object.pythonFileUris.length; ++i) + message.pythonFileUris[i] = String(object.pythonFileUris[i]); + } + if (object.jarFileUris) { + if (!Array.isArray(object.jarFileUris)) + throw TypeError(".google.cloud.dataproc.v1.PySparkJob.jarFileUris: array expected"); + message.jarFileUris = []; + for (var i = 0; i < object.jarFileUris.length; ++i) + message.jarFileUris[i] = String(object.jarFileUris[i]); + } + if (object.fileUris) { + if (!Array.isArray(object.fileUris)) + throw TypeError(".google.cloud.dataproc.v1.PySparkJob.fileUris: array expected"); + message.fileUris = []; + for (var i = 0; i < object.fileUris.length; ++i) + message.fileUris[i] = String(object.fileUris[i]); + } + if (object.archiveUris) { + if (!Array.isArray(object.archiveUris)) + throw TypeError(".google.cloud.dataproc.v1.PySparkJob.archiveUris: array expected"); + message.archiveUris = []; + for (var i = 0; i < object.archiveUris.length; ++i) + message.archiveUris[i] = String(object.archiveUris[i]); + } + if (object.properties) { + if (typeof object.properties !== "object") + throw TypeError(".google.cloud.dataproc.v1.PySparkJob.properties: object expected"); + message.properties = {}; + for (var keys = Object.keys(object.properties), i = 0; i < keys.length; ++i) + message.properties[keys[i]] = String(object.properties[keys[i]]); + } + if (object.loggingConfig != null) { + if (typeof object.loggingConfig !== "object") + throw TypeError(".google.cloud.dataproc.v1.PySparkJob.loggingConfig: object expected"); + message.loggingConfig = $root.google.cloud.dataproc.v1.LoggingConfig.fromObject(object.loggingConfig); + } + return message; + }; + + /** + * Creates a plain object from a PySparkJob message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.QueryList + * @memberof google.cloud.dataproc.v1.PySparkJob * @static - * @param {google.cloud.dataproc.v1.QueryList} message QueryList + * @param {google.cloud.dataproc.v1.PySparkJob} message PySparkJob * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - QueryList.toObject = function toObject(message, options) { + PySparkJob.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.arrays || options.defaults) - object.queries = []; - if (message.queries && message.queries.length) { - object.queries = []; - for (var j = 0; j < message.queries.length; ++j) - object.queries[j] = message.queries[j]; + if (options.arrays || options.defaults) { + object.args = []; + object.pythonFileUris = []; + object.jarFileUris = []; + object.fileUris = []; + object.archiveUris = []; } + if (options.objects || options.defaults) + object.properties = {}; + if (options.defaults) { + object.mainPythonFileUri = ""; + object.loggingConfig = null; + } + if (message.mainPythonFileUri != null && message.hasOwnProperty("mainPythonFileUri")) + object.mainPythonFileUri = message.mainPythonFileUri; + if (message.args && message.args.length) { + object.args = []; + for (var j = 0; j < message.args.length; ++j) + object.args[j] = message.args[j]; + } + if (message.pythonFileUris && message.pythonFileUris.length) { + object.pythonFileUris = []; + for (var j = 0; j < message.pythonFileUris.length; ++j) + object.pythonFileUris[j] = message.pythonFileUris[j]; + } + if (message.jarFileUris && message.jarFileUris.length) { + object.jarFileUris = []; + for (var j = 0; j < message.jarFileUris.length; ++j) + object.jarFileUris[j] = message.jarFileUris[j]; + } + if (message.fileUris && message.fileUris.length) { + object.fileUris = []; + for (var j = 0; j < message.fileUris.length; ++j) + object.fileUris[j] = message.fileUris[j]; + } + if (message.archiveUris && message.archiveUris.length) { + object.archiveUris = []; + for (var j = 0; j < message.archiveUris.length; ++j) + object.archiveUris[j] = message.archiveUris[j]; + } + var keys2; + if (message.properties && (keys2 = Object.keys(message.properties)).length) { + object.properties = {}; + for (var j = 0; j < keys2.length; ++j) + object.properties[keys2[j]] = message.properties[keys2[j]]; + } + if (message.loggingConfig != null && message.hasOwnProperty("loggingConfig")) + object.loggingConfig = $root.google.cloud.dataproc.v1.LoggingConfig.toObject(message.loggingConfig, options); return object; }; /** - * Converts this QueryList to JSON. + * Converts this PySparkJob to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.QueryList + * @memberof google.cloud.dataproc.v1.PySparkJob * @instance * @returns {Object.} JSON object */ - QueryList.prototype.toJSON = function toJSON() { + PySparkJob.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for QueryList + * Gets the default type url for PySparkJob * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.QueryList + * @memberof google.cloud.dataproc.v1.PySparkJob * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - QueryList.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + PySparkJob.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.QueryList"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.PySparkJob"; }; - return QueryList; + return PySparkJob; })(); - v1.HiveJob = (function() { + v1.QueryList = (function() { /** - * Properties of a HiveJob. + * Properties of a QueryList. * @memberof google.cloud.dataproc.v1 - * @interface IHiveJob - * @property {string|null} [queryFileUri] HiveJob queryFileUri - * @property {google.cloud.dataproc.v1.IQueryList|null} [queryList] HiveJob queryList - * @property {boolean|null} [continueOnFailure] HiveJob continueOnFailure - * @property {Object.|null} [scriptVariables] HiveJob scriptVariables - * @property {Object.|null} [properties] HiveJob properties - * @property {Array.|null} [jarFileUris] HiveJob jarFileUris + * @interface IQueryList + * @property {Array.|null} [queries] QueryList queries */ /** - * Constructs a new HiveJob. + * Constructs a new QueryList. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a HiveJob. - * @implements IHiveJob + * @classdesc Represents a QueryList. + * @implements IQueryList * @constructor - * @param {google.cloud.dataproc.v1.IHiveJob=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IQueryList=} [properties] Properties to set */ - function HiveJob(properties) { - this.scriptVariables = {}; - this.properties = {}; - this.jarFileUris = []; + function QueryList(properties) { + this.queries = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -24432,32 +24855,258 @@ } /** - * HiveJob queryFileUri. - * @member {string|null|undefined} queryFileUri - * @memberof google.cloud.dataproc.v1.HiveJob + * QueryList queries. + * @member {Array.} queries + * @memberof google.cloud.dataproc.v1.QueryList * @instance */ - HiveJob.prototype.queryFileUri = null; + QueryList.prototype.queries = $util.emptyArray; /** - * HiveJob queryList. - * @member {google.cloud.dataproc.v1.IQueryList|null|undefined} queryList - * @memberof google.cloud.dataproc.v1.HiveJob - * @instance + * Creates a new QueryList instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1.QueryList + * @static + * @param {google.cloud.dataproc.v1.IQueryList=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.QueryList} QueryList instance */ - HiveJob.prototype.queryList = null; + QueryList.create = function create(properties) { + return new QueryList(properties); + }; /** - * HiveJob continueOnFailure. - * @member {boolean} continueOnFailure - * @memberof google.cloud.dataproc.v1.HiveJob - * @instance + * Encodes the specified QueryList message. Does not implicitly {@link google.cloud.dataproc.v1.QueryList.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1.QueryList + * @static + * @param {google.cloud.dataproc.v1.IQueryList} message QueryList message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - HiveJob.prototype.continueOnFailure = false; + QueryList.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.queries != null && message.queries.length) + for (var i = 0; i < message.queries.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.queries[i]); + return writer; + }; /** - * HiveJob scriptVariables. - * @member {Object.} scriptVariables + * Encodes the specified QueryList message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.QueryList.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1.QueryList + * @static + * @param {google.cloud.dataproc.v1.IQueryList} message QueryList message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + QueryList.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a QueryList message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1.QueryList + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1.QueryList} QueryList + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + QueryList.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.QueryList(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.queries && message.queries.length)) + message.queries = []; + message.queries.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a QueryList message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1.QueryList + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1.QueryList} QueryList + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + QueryList.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a QueryList message. + * @function verify + * @memberof google.cloud.dataproc.v1.QueryList + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + QueryList.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.queries != null && message.hasOwnProperty("queries")) { + if (!Array.isArray(message.queries)) + return "queries: array expected"; + for (var i = 0; i < message.queries.length; ++i) + if (!$util.isString(message.queries[i])) + return "queries: string[] expected"; + } + return null; + }; + + /** + * Creates a QueryList message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.QueryList + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1.QueryList} QueryList + */ + QueryList.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.QueryList) + return object; + var message = new $root.google.cloud.dataproc.v1.QueryList(); + if (object.queries) { + if (!Array.isArray(object.queries)) + throw TypeError(".google.cloud.dataproc.v1.QueryList.queries: array expected"); + message.queries = []; + for (var i = 0; i < object.queries.length; ++i) + message.queries[i] = String(object.queries[i]); + } + return message; + }; + + /** + * Creates a plain object from a QueryList message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1.QueryList + * @static + * @param {google.cloud.dataproc.v1.QueryList} message QueryList + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + QueryList.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) + object.queries = []; + if (message.queries && message.queries.length) { + object.queries = []; + for (var j = 0; j < message.queries.length; ++j) + object.queries[j] = message.queries[j]; + } + return object; + }; + + /** + * Converts this QueryList to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1.QueryList + * @instance + * @returns {Object.} JSON object + */ + QueryList.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for QueryList + * @function getTypeUrl + * @memberof google.cloud.dataproc.v1.QueryList + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + QueryList.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/google.cloud.dataproc.v1.QueryList"; + }; + + return QueryList; + })(); + + v1.HiveJob = (function() { + + /** + * Properties of a HiveJob. + * @memberof google.cloud.dataproc.v1 + * @interface IHiveJob + * @property {string|null} [queryFileUri] HiveJob queryFileUri + * @property {google.cloud.dataproc.v1.IQueryList|null} [queryList] HiveJob queryList + * @property {boolean|null} [continueOnFailure] HiveJob continueOnFailure + * @property {Object.|null} [scriptVariables] HiveJob scriptVariables + * @property {Object.|null} [properties] HiveJob properties + * @property {Array.|null} [jarFileUris] HiveJob jarFileUris + */ + + /** + * Constructs a new HiveJob. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a HiveJob. + * @implements IHiveJob + * @constructor + * @param {google.cloud.dataproc.v1.IHiveJob=} [properties] Properties to set + */ + function HiveJob(properties) { + this.scriptVariables = {}; + this.properties = {}; + this.jarFileUris = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * HiveJob queryFileUri. + * @member {string|null|undefined} queryFileUri + * @memberof google.cloud.dataproc.v1.HiveJob + * @instance + */ + HiveJob.prototype.queryFileUri = null; + + /** + * HiveJob queryList. + * @member {google.cloud.dataproc.v1.IQueryList|null|undefined} queryList + * @memberof google.cloud.dataproc.v1.HiveJob + * @instance + */ + HiveJob.prototype.queryList = null; + + /** + * HiveJob continueOnFailure. + * @member {boolean} continueOnFailure + * @memberof google.cloud.dataproc.v1.HiveJob + * @instance + */ + HiveJob.prototype.continueOnFailure = false; + + /** + * HiveJob scriptVariables. + * @member {Object.} scriptVariables * @memberof google.cloud.dataproc.v1.HiveJob * @instance */ @@ -27892,6 +28541,7 @@ * @property {google.cloud.dataproc.v1.IJobScheduling|null} [scheduling] Job scheduling * @property {string|null} [jobUuid] Job jobUuid * @property {boolean|null} [done] Job done + * @property {google.cloud.dataproc.v1.IDriverSchedulingConfig|null} [driverSchedulingConfig] Job driverSchedulingConfig */ /** @@ -28064,6 +28714,14 @@ */ Job.prototype.done = false; + /** + * Job driverSchedulingConfig. + * @member {google.cloud.dataproc.v1.IDriverSchedulingConfig|null|undefined} driverSchedulingConfig + * @memberof google.cloud.dataproc.v1.Job + * @instance + */ + Job.prototype.driverSchedulingConfig = null; + // OneOf field names bound to virtual getters and setters var $oneOfFields; @@ -28143,6 +28801,8 @@ $root.google.cloud.dataproc.v1.PrestoJob.encode(message.prestoJob, writer.uint32(/* id 23, wireType 2 =*/186).fork()).ldelim(); if (message.done != null && Object.hasOwnProperty.call(message, "done")) writer.uint32(/* id 24, wireType 0 =*/192).bool(message.done); + if (message.driverSchedulingConfig != null && Object.hasOwnProperty.call(message, "driverSchedulingConfig")) + $root.google.cloud.dataproc.v1.DriverSchedulingConfig.encode(message.driverSchedulingConfig, writer.uint32(/* id 27, wireType 2 =*/218).fork()).ldelim(); return writer; }; @@ -28276,6 +28936,10 @@ message.done = reader.bool(); break; } + case 27: { + message.driverSchedulingConfig = $root.google.cloud.dataproc.v1.DriverSchedulingConfig.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -28442,277 +29106,1560 @@ if (error) return "scheduling." + error; } - if (message.jobUuid != null && message.hasOwnProperty("jobUuid")) - if (!$util.isString(message.jobUuid)) - return "jobUuid: string expected"; - if (message.done != null && message.hasOwnProperty("done")) - if (typeof message.done !== "boolean") - return "done: boolean expected"; - return null; + if (message.jobUuid != null && message.hasOwnProperty("jobUuid")) + if (!$util.isString(message.jobUuid)) + return "jobUuid: string expected"; + if (message.done != null && message.hasOwnProperty("done")) + if (typeof message.done !== "boolean") + return "done: boolean expected"; + if (message.driverSchedulingConfig != null && message.hasOwnProperty("driverSchedulingConfig")) { + var error = $root.google.cloud.dataproc.v1.DriverSchedulingConfig.verify(message.driverSchedulingConfig); + if (error) + return "driverSchedulingConfig." + error; + } + return null; + }; + + /** + * Creates a Job message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.Job + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1.Job} Job + */ + Job.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.Job) + return object; + var message = new $root.google.cloud.dataproc.v1.Job(); + if (object.reference != null) { + if (typeof object.reference !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.reference: object expected"); + message.reference = $root.google.cloud.dataproc.v1.JobReference.fromObject(object.reference); + } + if (object.placement != null) { + if (typeof object.placement !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.placement: object expected"); + message.placement = $root.google.cloud.dataproc.v1.JobPlacement.fromObject(object.placement); + } + if (object.hadoopJob != null) { + if (typeof object.hadoopJob !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.hadoopJob: object expected"); + message.hadoopJob = $root.google.cloud.dataproc.v1.HadoopJob.fromObject(object.hadoopJob); + } + if (object.sparkJob != null) { + if (typeof object.sparkJob !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.sparkJob: object expected"); + message.sparkJob = $root.google.cloud.dataproc.v1.SparkJob.fromObject(object.sparkJob); + } + if (object.pysparkJob != null) { + if (typeof object.pysparkJob !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.pysparkJob: object expected"); + message.pysparkJob = $root.google.cloud.dataproc.v1.PySparkJob.fromObject(object.pysparkJob); + } + if (object.hiveJob != null) { + if (typeof object.hiveJob !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.hiveJob: object expected"); + message.hiveJob = $root.google.cloud.dataproc.v1.HiveJob.fromObject(object.hiveJob); + } + if (object.pigJob != null) { + if (typeof object.pigJob !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.pigJob: object expected"); + message.pigJob = $root.google.cloud.dataproc.v1.PigJob.fromObject(object.pigJob); + } + if (object.sparkRJob != null) { + if (typeof object.sparkRJob !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.sparkRJob: object expected"); + message.sparkRJob = $root.google.cloud.dataproc.v1.SparkRJob.fromObject(object.sparkRJob); + } + if (object.sparkSqlJob != null) { + if (typeof object.sparkSqlJob !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.sparkSqlJob: object expected"); + message.sparkSqlJob = $root.google.cloud.dataproc.v1.SparkSqlJob.fromObject(object.sparkSqlJob); + } + if (object.prestoJob != null) { + if (typeof object.prestoJob !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.prestoJob: object expected"); + message.prestoJob = $root.google.cloud.dataproc.v1.PrestoJob.fromObject(object.prestoJob); + } + if (object.status != null) { + if (typeof object.status !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.status: object expected"); + message.status = $root.google.cloud.dataproc.v1.JobStatus.fromObject(object.status); + } + if (object.statusHistory) { + if (!Array.isArray(object.statusHistory)) + throw TypeError(".google.cloud.dataproc.v1.Job.statusHistory: array expected"); + message.statusHistory = []; + for (var i = 0; i < object.statusHistory.length; ++i) { + if (typeof object.statusHistory[i] !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.statusHistory: object expected"); + message.statusHistory[i] = $root.google.cloud.dataproc.v1.JobStatus.fromObject(object.statusHistory[i]); + } + } + if (object.yarnApplications) { + if (!Array.isArray(object.yarnApplications)) + throw TypeError(".google.cloud.dataproc.v1.Job.yarnApplications: array expected"); + message.yarnApplications = []; + for (var i = 0; i < object.yarnApplications.length; ++i) { + if (typeof object.yarnApplications[i] !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.yarnApplications: object expected"); + message.yarnApplications[i] = $root.google.cloud.dataproc.v1.YarnApplication.fromObject(object.yarnApplications[i]); + } + } + if (object.driverOutputResourceUri != null) + message.driverOutputResourceUri = String(object.driverOutputResourceUri); + if (object.driverControlFilesUri != null) + message.driverControlFilesUri = String(object.driverControlFilesUri); + if (object.labels) { + if (typeof object.labels !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.labels: object expected"); + message.labels = {}; + for (var keys = Object.keys(object.labels), i = 0; i < keys.length; ++i) + message.labels[keys[i]] = String(object.labels[keys[i]]); + } + if (object.scheduling != null) { + if (typeof object.scheduling !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.scheduling: object expected"); + message.scheduling = $root.google.cloud.dataproc.v1.JobScheduling.fromObject(object.scheduling); + } + if (object.jobUuid != null) + message.jobUuid = String(object.jobUuid); + if (object.done != null) + message.done = Boolean(object.done); + if (object.driverSchedulingConfig != null) { + if (typeof object.driverSchedulingConfig !== "object") + throw TypeError(".google.cloud.dataproc.v1.Job.driverSchedulingConfig: object expected"); + message.driverSchedulingConfig = $root.google.cloud.dataproc.v1.DriverSchedulingConfig.fromObject(object.driverSchedulingConfig); + } + return message; + }; + + /** + * Creates a plain object from a Job message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1.Job + * @static + * @param {google.cloud.dataproc.v1.Job} message Job + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Job.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) { + object.yarnApplications = []; + object.statusHistory = []; + } + if (options.objects || options.defaults) + object.labels = {}; + if (options.defaults) { + object.reference = null; + object.placement = null; + object.status = null; + object.driverControlFilesUri = ""; + object.driverOutputResourceUri = ""; + object.scheduling = null; + object.jobUuid = ""; + object.done = false; + object.driverSchedulingConfig = null; + } + if (message.reference != null && message.hasOwnProperty("reference")) + object.reference = $root.google.cloud.dataproc.v1.JobReference.toObject(message.reference, options); + if (message.placement != null && message.hasOwnProperty("placement")) + object.placement = $root.google.cloud.dataproc.v1.JobPlacement.toObject(message.placement, options); + if (message.hadoopJob != null && message.hasOwnProperty("hadoopJob")) { + object.hadoopJob = $root.google.cloud.dataproc.v1.HadoopJob.toObject(message.hadoopJob, options); + if (options.oneofs) + object.typeJob = "hadoopJob"; + } + if (message.sparkJob != null && message.hasOwnProperty("sparkJob")) { + object.sparkJob = $root.google.cloud.dataproc.v1.SparkJob.toObject(message.sparkJob, options); + if (options.oneofs) + object.typeJob = "sparkJob"; + } + if (message.pysparkJob != null && message.hasOwnProperty("pysparkJob")) { + object.pysparkJob = $root.google.cloud.dataproc.v1.PySparkJob.toObject(message.pysparkJob, options); + if (options.oneofs) + object.typeJob = "pysparkJob"; + } + if (message.hiveJob != null && message.hasOwnProperty("hiveJob")) { + object.hiveJob = $root.google.cloud.dataproc.v1.HiveJob.toObject(message.hiveJob, options); + if (options.oneofs) + object.typeJob = "hiveJob"; + } + if (message.pigJob != null && message.hasOwnProperty("pigJob")) { + object.pigJob = $root.google.cloud.dataproc.v1.PigJob.toObject(message.pigJob, options); + if (options.oneofs) + object.typeJob = "pigJob"; + } + if (message.status != null && message.hasOwnProperty("status")) + object.status = $root.google.cloud.dataproc.v1.JobStatus.toObject(message.status, options); + if (message.yarnApplications && message.yarnApplications.length) { + object.yarnApplications = []; + for (var j = 0; j < message.yarnApplications.length; ++j) + object.yarnApplications[j] = $root.google.cloud.dataproc.v1.YarnApplication.toObject(message.yarnApplications[j], options); + } + if (message.sparkSqlJob != null && message.hasOwnProperty("sparkSqlJob")) { + object.sparkSqlJob = $root.google.cloud.dataproc.v1.SparkSqlJob.toObject(message.sparkSqlJob, options); + if (options.oneofs) + object.typeJob = "sparkSqlJob"; + } + if (message.statusHistory && message.statusHistory.length) { + object.statusHistory = []; + for (var j = 0; j < message.statusHistory.length; ++j) + object.statusHistory[j] = $root.google.cloud.dataproc.v1.JobStatus.toObject(message.statusHistory[j], options); + } + if (message.driverControlFilesUri != null && message.hasOwnProperty("driverControlFilesUri")) + object.driverControlFilesUri = message.driverControlFilesUri; + if (message.driverOutputResourceUri != null && message.hasOwnProperty("driverOutputResourceUri")) + object.driverOutputResourceUri = message.driverOutputResourceUri; + var keys2; + if (message.labels && (keys2 = Object.keys(message.labels)).length) { + object.labels = {}; + for (var j = 0; j < keys2.length; ++j) + object.labels[keys2[j]] = message.labels[keys2[j]]; + } + if (message.scheduling != null && message.hasOwnProperty("scheduling")) + object.scheduling = $root.google.cloud.dataproc.v1.JobScheduling.toObject(message.scheduling, options); + if (message.sparkRJob != null && message.hasOwnProperty("sparkRJob")) { + object.sparkRJob = $root.google.cloud.dataproc.v1.SparkRJob.toObject(message.sparkRJob, options); + if (options.oneofs) + object.typeJob = "sparkRJob"; + } + if (message.jobUuid != null && message.hasOwnProperty("jobUuid")) + object.jobUuid = message.jobUuid; + if (message.prestoJob != null && message.hasOwnProperty("prestoJob")) { + object.prestoJob = $root.google.cloud.dataproc.v1.PrestoJob.toObject(message.prestoJob, options); + if (options.oneofs) + object.typeJob = "prestoJob"; + } + if (message.done != null && message.hasOwnProperty("done")) + object.done = message.done; + if (message.driverSchedulingConfig != null && message.hasOwnProperty("driverSchedulingConfig")) + object.driverSchedulingConfig = $root.google.cloud.dataproc.v1.DriverSchedulingConfig.toObject(message.driverSchedulingConfig, options); + return object; + }; + + /** + * Converts this Job to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1.Job + * @instance + * @returns {Object.} JSON object + */ + Job.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Job + * @function getTypeUrl + * @memberof google.cloud.dataproc.v1.Job + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Job.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/google.cloud.dataproc.v1.Job"; + }; + + return Job; + })(); + + v1.DriverSchedulingConfig = (function() { + + /** + * Properties of a DriverSchedulingConfig. + * @memberof google.cloud.dataproc.v1 + * @interface IDriverSchedulingConfig + * @property {number|null} [memoryMb] DriverSchedulingConfig memoryMb + * @property {number|null} [vcores] DriverSchedulingConfig vcores + */ + + /** + * Constructs a new DriverSchedulingConfig. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a DriverSchedulingConfig. + * @implements IDriverSchedulingConfig + * @constructor + * @param {google.cloud.dataproc.v1.IDriverSchedulingConfig=} [properties] Properties to set + */ + function DriverSchedulingConfig(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * DriverSchedulingConfig memoryMb. + * @member {number} memoryMb + * @memberof google.cloud.dataproc.v1.DriverSchedulingConfig + * @instance + */ + DriverSchedulingConfig.prototype.memoryMb = 0; + + /** + * DriverSchedulingConfig vcores. + * @member {number} vcores + * @memberof google.cloud.dataproc.v1.DriverSchedulingConfig + * @instance + */ + DriverSchedulingConfig.prototype.vcores = 0; + + /** + * Creates a new DriverSchedulingConfig instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1.DriverSchedulingConfig + * @static + * @param {google.cloud.dataproc.v1.IDriverSchedulingConfig=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.DriverSchedulingConfig} DriverSchedulingConfig instance + */ + DriverSchedulingConfig.create = function create(properties) { + return new DriverSchedulingConfig(properties); + }; + + /** + * Encodes the specified DriverSchedulingConfig message. Does not implicitly {@link google.cloud.dataproc.v1.DriverSchedulingConfig.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1.DriverSchedulingConfig + * @static + * @param {google.cloud.dataproc.v1.IDriverSchedulingConfig} message DriverSchedulingConfig message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DriverSchedulingConfig.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.memoryMb != null && Object.hasOwnProperty.call(message, "memoryMb")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.memoryMb); + if (message.vcores != null && Object.hasOwnProperty.call(message, "vcores")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.vcores); + return writer; + }; + + /** + * Encodes the specified DriverSchedulingConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DriverSchedulingConfig.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1.DriverSchedulingConfig + * @static + * @param {google.cloud.dataproc.v1.IDriverSchedulingConfig} message DriverSchedulingConfig message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DriverSchedulingConfig.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DriverSchedulingConfig message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1.DriverSchedulingConfig + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1.DriverSchedulingConfig} DriverSchedulingConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DriverSchedulingConfig.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.DriverSchedulingConfig(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.memoryMb = reader.int32(); + break; + } + case 2: { + message.vcores = reader.int32(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DriverSchedulingConfig message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1.DriverSchedulingConfig + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1.DriverSchedulingConfig} DriverSchedulingConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DriverSchedulingConfig.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DriverSchedulingConfig message. + * @function verify + * @memberof google.cloud.dataproc.v1.DriverSchedulingConfig + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DriverSchedulingConfig.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.memoryMb != null && message.hasOwnProperty("memoryMb")) + if (!$util.isInteger(message.memoryMb)) + return "memoryMb: integer expected"; + if (message.vcores != null && message.hasOwnProperty("vcores")) + if (!$util.isInteger(message.vcores)) + return "vcores: integer expected"; + return null; + }; + + /** + * Creates a DriverSchedulingConfig message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.DriverSchedulingConfig + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1.DriverSchedulingConfig} DriverSchedulingConfig + */ + DriverSchedulingConfig.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.DriverSchedulingConfig) + return object; + var message = new $root.google.cloud.dataproc.v1.DriverSchedulingConfig(); + if (object.memoryMb != null) + message.memoryMb = object.memoryMb | 0; + if (object.vcores != null) + message.vcores = object.vcores | 0; + return message; + }; + + /** + * Creates a plain object from a DriverSchedulingConfig message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1.DriverSchedulingConfig + * @static + * @param {google.cloud.dataproc.v1.DriverSchedulingConfig} message DriverSchedulingConfig + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DriverSchedulingConfig.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.memoryMb = 0; + object.vcores = 0; + } + if (message.memoryMb != null && message.hasOwnProperty("memoryMb")) + object.memoryMb = message.memoryMb; + if (message.vcores != null && message.hasOwnProperty("vcores")) + object.vcores = message.vcores; + return object; + }; + + /** + * Converts this DriverSchedulingConfig to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1.DriverSchedulingConfig + * @instance + * @returns {Object.} JSON object + */ + DriverSchedulingConfig.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DriverSchedulingConfig + * @function getTypeUrl + * @memberof google.cloud.dataproc.v1.DriverSchedulingConfig + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DriverSchedulingConfig.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/google.cloud.dataproc.v1.DriverSchedulingConfig"; + }; + + return DriverSchedulingConfig; + })(); + + v1.JobScheduling = (function() { + + /** + * Properties of a JobScheduling. + * @memberof google.cloud.dataproc.v1 + * @interface IJobScheduling + * @property {number|null} [maxFailuresPerHour] JobScheduling maxFailuresPerHour + * @property {number|null} [maxFailuresTotal] JobScheduling maxFailuresTotal + */ + + /** + * Constructs a new JobScheduling. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a JobScheduling. + * @implements IJobScheduling + * @constructor + * @param {google.cloud.dataproc.v1.IJobScheduling=} [properties] Properties to set + */ + function JobScheduling(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * JobScheduling maxFailuresPerHour. + * @member {number} maxFailuresPerHour + * @memberof google.cloud.dataproc.v1.JobScheduling + * @instance + */ + JobScheduling.prototype.maxFailuresPerHour = 0; + + /** + * JobScheduling maxFailuresTotal. + * @member {number} maxFailuresTotal + * @memberof google.cloud.dataproc.v1.JobScheduling + * @instance + */ + JobScheduling.prototype.maxFailuresTotal = 0; + + /** + * Creates a new JobScheduling instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1.JobScheduling + * @static + * @param {google.cloud.dataproc.v1.IJobScheduling=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.JobScheduling} JobScheduling instance + */ + JobScheduling.create = function create(properties) { + return new JobScheduling(properties); + }; + + /** + * Encodes the specified JobScheduling message. Does not implicitly {@link google.cloud.dataproc.v1.JobScheduling.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1.JobScheduling + * @static + * @param {google.cloud.dataproc.v1.IJobScheduling} message JobScheduling message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + JobScheduling.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.maxFailuresPerHour != null && Object.hasOwnProperty.call(message, "maxFailuresPerHour")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.maxFailuresPerHour); + if (message.maxFailuresTotal != null && Object.hasOwnProperty.call(message, "maxFailuresTotal")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.maxFailuresTotal); + return writer; + }; + + /** + * Encodes the specified JobScheduling message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.JobScheduling.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1.JobScheduling + * @static + * @param {google.cloud.dataproc.v1.IJobScheduling} message JobScheduling message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + JobScheduling.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a JobScheduling message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1.JobScheduling + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1.JobScheduling} JobScheduling + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + JobScheduling.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.JobScheduling(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.maxFailuresPerHour = reader.int32(); + break; + } + case 2: { + message.maxFailuresTotal = reader.int32(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a JobScheduling message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1.JobScheduling + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1.JobScheduling} JobScheduling + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + JobScheduling.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a JobScheduling message. + * @function verify + * @memberof google.cloud.dataproc.v1.JobScheduling + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + JobScheduling.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.maxFailuresPerHour != null && message.hasOwnProperty("maxFailuresPerHour")) + if (!$util.isInteger(message.maxFailuresPerHour)) + return "maxFailuresPerHour: integer expected"; + if (message.maxFailuresTotal != null && message.hasOwnProperty("maxFailuresTotal")) + if (!$util.isInteger(message.maxFailuresTotal)) + return "maxFailuresTotal: integer expected"; + return null; + }; + + /** + * Creates a JobScheduling message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.JobScheduling + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1.JobScheduling} JobScheduling + */ + JobScheduling.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.JobScheduling) + return object; + var message = new $root.google.cloud.dataproc.v1.JobScheduling(); + if (object.maxFailuresPerHour != null) + message.maxFailuresPerHour = object.maxFailuresPerHour | 0; + if (object.maxFailuresTotal != null) + message.maxFailuresTotal = object.maxFailuresTotal | 0; + return message; + }; + + /** + * Creates a plain object from a JobScheduling message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1.JobScheduling + * @static + * @param {google.cloud.dataproc.v1.JobScheduling} message JobScheduling + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + JobScheduling.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.maxFailuresPerHour = 0; + object.maxFailuresTotal = 0; + } + if (message.maxFailuresPerHour != null && message.hasOwnProperty("maxFailuresPerHour")) + object.maxFailuresPerHour = message.maxFailuresPerHour; + if (message.maxFailuresTotal != null && message.hasOwnProperty("maxFailuresTotal")) + object.maxFailuresTotal = message.maxFailuresTotal; + return object; + }; + + /** + * Converts this JobScheduling to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1.JobScheduling + * @instance + * @returns {Object.} JSON object + */ + JobScheduling.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for JobScheduling + * @function getTypeUrl + * @memberof google.cloud.dataproc.v1.JobScheduling + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + JobScheduling.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/google.cloud.dataproc.v1.JobScheduling"; + }; + + return JobScheduling; + })(); + + v1.SubmitJobRequest = (function() { + + /** + * Properties of a SubmitJobRequest. + * @memberof google.cloud.dataproc.v1 + * @interface ISubmitJobRequest + * @property {string|null} [projectId] SubmitJobRequest projectId + * @property {string|null} [region] SubmitJobRequest region + * @property {google.cloud.dataproc.v1.IJob|null} [job] SubmitJobRequest job + * @property {string|null} [requestId] SubmitJobRequest requestId + */ + + /** + * Constructs a new SubmitJobRequest. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a SubmitJobRequest. + * @implements ISubmitJobRequest + * @constructor + * @param {google.cloud.dataproc.v1.ISubmitJobRequest=} [properties] Properties to set + */ + function SubmitJobRequest(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * SubmitJobRequest projectId. + * @member {string} projectId + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @instance + */ + SubmitJobRequest.prototype.projectId = ""; + + /** + * SubmitJobRequest region. + * @member {string} region + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @instance + */ + SubmitJobRequest.prototype.region = ""; + + /** + * SubmitJobRequest job. + * @member {google.cloud.dataproc.v1.IJob|null|undefined} job + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @instance + */ + SubmitJobRequest.prototype.job = null; + + /** + * SubmitJobRequest requestId. + * @member {string} requestId + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @instance + */ + SubmitJobRequest.prototype.requestId = ""; + + /** + * Creates a new SubmitJobRequest instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @static + * @param {google.cloud.dataproc.v1.ISubmitJobRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.SubmitJobRequest} SubmitJobRequest instance + */ + SubmitJobRequest.create = function create(properties) { + return new SubmitJobRequest(properties); + }; + + /** + * Encodes the specified SubmitJobRequest message. Does not implicitly {@link google.cloud.dataproc.v1.SubmitJobRequest.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @static + * @param {google.cloud.dataproc.v1.ISubmitJobRequest} message SubmitJobRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SubmitJobRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); + if (message.job != null && Object.hasOwnProperty.call(message, "job")) + $root.google.cloud.dataproc.v1.Job.encode(message.job, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.region != null && Object.hasOwnProperty.call(message, "region")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); + if (message.requestId != null && Object.hasOwnProperty.call(message, "requestId")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.requestId); + return writer; + }; + + /** + * Encodes the specified SubmitJobRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.SubmitJobRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @static + * @param {google.cloud.dataproc.v1.ISubmitJobRequest} message SubmitJobRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SubmitJobRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a SubmitJobRequest message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1.SubmitJobRequest} SubmitJobRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SubmitJobRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.SubmitJobRequest(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.projectId = reader.string(); + break; + } + case 3: { + message.region = reader.string(); + break; + } + case 2: { + message.job = $root.google.cloud.dataproc.v1.Job.decode(reader, reader.uint32()); + break; + } + case 4: { + message.requestId = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a SubmitJobRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1.SubmitJobRequest} SubmitJobRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SubmitJobRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a SubmitJobRequest message. + * @function verify + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + SubmitJobRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.projectId != null && message.hasOwnProperty("projectId")) + if (!$util.isString(message.projectId)) + return "projectId: string expected"; + if (message.region != null && message.hasOwnProperty("region")) + if (!$util.isString(message.region)) + return "region: string expected"; + if (message.job != null && message.hasOwnProperty("job")) { + var error = $root.google.cloud.dataproc.v1.Job.verify(message.job); + if (error) + return "job." + error; + } + if (message.requestId != null && message.hasOwnProperty("requestId")) + if (!$util.isString(message.requestId)) + return "requestId: string expected"; + return null; + }; + + /** + * Creates a SubmitJobRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1.SubmitJobRequest} SubmitJobRequest + */ + SubmitJobRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.SubmitJobRequest) + return object; + var message = new $root.google.cloud.dataproc.v1.SubmitJobRequest(); + if (object.projectId != null) + message.projectId = String(object.projectId); + if (object.region != null) + message.region = String(object.region); + if (object.job != null) { + if (typeof object.job !== "object") + throw TypeError(".google.cloud.dataproc.v1.SubmitJobRequest.job: object expected"); + message.job = $root.google.cloud.dataproc.v1.Job.fromObject(object.job); + } + if (object.requestId != null) + message.requestId = String(object.requestId); + return message; + }; + + /** + * Creates a plain object from a SubmitJobRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @static + * @param {google.cloud.dataproc.v1.SubmitJobRequest} message SubmitJobRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + SubmitJobRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.projectId = ""; + object.job = null; + object.region = ""; + object.requestId = ""; + } + if (message.projectId != null && message.hasOwnProperty("projectId")) + object.projectId = message.projectId; + if (message.job != null && message.hasOwnProperty("job")) + object.job = $root.google.cloud.dataproc.v1.Job.toObject(message.job, options); + if (message.region != null && message.hasOwnProperty("region")) + object.region = message.region; + if (message.requestId != null && message.hasOwnProperty("requestId")) + object.requestId = message.requestId; + return object; + }; + + /** + * Converts this SubmitJobRequest to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @instance + * @returns {Object.} JSON object + */ + SubmitJobRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for SubmitJobRequest + * @function getTypeUrl + * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + SubmitJobRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/google.cloud.dataproc.v1.SubmitJobRequest"; + }; + + return SubmitJobRequest; + })(); + + v1.JobMetadata = (function() { + + /** + * Properties of a JobMetadata. + * @memberof google.cloud.dataproc.v1 + * @interface IJobMetadata + * @property {string|null} [jobId] JobMetadata jobId + * @property {google.cloud.dataproc.v1.IJobStatus|null} [status] JobMetadata status + * @property {string|null} [operationType] JobMetadata operationType + * @property {google.protobuf.ITimestamp|null} [startTime] JobMetadata startTime + */ + + /** + * Constructs a new JobMetadata. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a JobMetadata. + * @implements IJobMetadata + * @constructor + * @param {google.cloud.dataproc.v1.IJobMetadata=} [properties] Properties to set + */ + function JobMetadata(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * JobMetadata jobId. + * @member {string} jobId + * @memberof google.cloud.dataproc.v1.JobMetadata + * @instance + */ + JobMetadata.prototype.jobId = ""; + + /** + * JobMetadata status. + * @member {google.cloud.dataproc.v1.IJobStatus|null|undefined} status + * @memberof google.cloud.dataproc.v1.JobMetadata + * @instance + */ + JobMetadata.prototype.status = null; + + /** + * JobMetadata operationType. + * @member {string} operationType + * @memberof google.cloud.dataproc.v1.JobMetadata + * @instance + */ + JobMetadata.prototype.operationType = ""; + + /** + * JobMetadata startTime. + * @member {google.protobuf.ITimestamp|null|undefined} startTime + * @memberof google.cloud.dataproc.v1.JobMetadata + * @instance + */ + JobMetadata.prototype.startTime = null; + + /** + * Creates a new JobMetadata instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {google.cloud.dataproc.v1.IJobMetadata=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.JobMetadata} JobMetadata instance + */ + JobMetadata.create = function create(properties) { + return new JobMetadata(properties); + }; + + /** + * Encodes the specified JobMetadata message. Does not implicitly {@link google.cloud.dataproc.v1.JobMetadata.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {google.cloud.dataproc.v1.IJobMetadata} message JobMetadata message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + JobMetadata.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.jobId != null && Object.hasOwnProperty.call(message, "jobId")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.jobId); + if (message.status != null && Object.hasOwnProperty.call(message, "status")) + $root.google.cloud.dataproc.v1.JobStatus.encode(message.status, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.operationType != null && Object.hasOwnProperty.call(message, "operationType")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.operationType); + if (message.startTime != null && Object.hasOwnProperty.call(message, "startTime")) + $root.google.protobuf.Timestamp.encode(message.startTime, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified JobMetadata message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.JobMetadata.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {google.cloud.dataproc.v1.IJobMetadata} message JobMetadata message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + JobMetadata.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a JobMetadata message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1.JobMetadata} JobMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + JobMetadata.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.JobMetadata(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.jobId = reader.string(); + break; + } + case 2: { + message.status = $root.google.cloud.dataproc.v1.JobStatus.decode(reader, reader.uint32()); + break; + } + case 3: { + message.operationType = reader.string(); + break; + } + case 4: { + message.startTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a JobMetadata message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1.JobMetadata} JobMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + JobMetadata.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a JobMetadata message. + * @function verify + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + JobMetadata.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.jobId != null && message.hasOwnProperty("jobId")) + if (!$util.isString(message.jobId)) + return "jobId: string expected"; + if (message.status != null && message.hasOwnProperty("status")) { + var error = $root.google.cloud.dataproc.v1.JobStatus.verify(message.status); + if (error) + return "status." + error; + } + if (message.operationType != null && message.hasOwnProperty("operationType")) + if (!$util.isString(message.operationType)) + return "operationType: string expected"; + if (message.startTime != null && message.hasOwnProperty("startTime")) { + var error = $root.google.protobuf.Timestamp.verify(message.startTime); + if (error) + return "startTime." + error; + } + return null; + }; + + /** + * Creates a JobMetadata message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1.JobMetadata} JobMetadata + */ + JobMetadata.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.JobMetadata) + return object; + var message = new $root.google.cloud.dataproc.v1.JobMetadata(); + if (object.jobId != null) + message.jobId = String(object.jobId); + if (object.status != null) { + if (typeof object.status !== "object") + throw TypeError(".google.cloud.dataproc.v1.JobMetadata.status: object expected"); + message.status = $root.google.cloud.dataproc.v1.JobStatus.fromObject(object.status); + } + if (object.operationType != null) + message.operationType = String(object.operationType); + if (object.startTime != null) { + if (typeof object.startTime !== "object") + throw TypeError(".google.cloud.dataproc.v1.JobMetadata.startTime: object expected"); + message.startTime = $root.google.protobuf.Timestamp.fromObject(object.startTime); + } + return message; + }; + + /** + * Creates a plain object from a JobMetadata message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {google.cloud.dataproc.v1.JobMetadata} message JobMetadata + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + JobMetadata.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.jobId = ""; + object.status = null; + object.operationType = ""; + object.startTime = null; + } + if (message.jobId != null && message.hasOwnProperty("jobId")) + object.jobId = message.jobId; + if (message.status != null && message.hasOwnProperty("status")) + object.status = $root.google.cloud.dataproc.v1.JobStatus.toObject(message.status, options); + if (message.operationType != null && message.hasOwnProperty("operationType")) + object.operationType = message.operationType; + if (message.startTime != null && message.hasOwnProperty("startTime")) + object.startTime = $root.google.protobuf.Timestamp.toObject(message.startTime, options); + return object; + }; + + /** + * Converts this JobMetadata to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1.JobMetadata + * @instance + * @returns {Object.} JSON object + */ + JobMetadata.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for JobMetadata + * @function getTypeUrl + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + JobMetadata.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/google.cloud.dataproc.v1.JobMetadata"; + }; + + return JobMetadata; + })(); + + v1.GetJobRequest = (function() { + + /** + * Properties of a GetJobRequest. + * @memberof google.cloud.dataproc.v1 + * @interface IGetJobRequest + * @property {string|null} [projectId] GetJobRequest projectId + * @property {string|null} [region] GetJobRequest region + * @property {string|null} [jobId] GetJobRequest jobId + */ + + /** + * Constructs a new GetJobRequest. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a GetJobRequest. + * @implements IGetJobRequest + * @constructor + * @param {google.cloud.dataproc.v1.IGetJobRequest=} [properties] Properties to set + */ + function GetJobRequest(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * GetJobRequest projectId. + * @member {string} projectId + * @memberof google.cloud.dataproc.v1.GetJobRequest + * @instance + */ + GetJobRequest.prototype.projectId = ""; + + /** + * GetJobRequest region. + * @member {string} region + * @memberof google.cloud.dataproc.v1.GetJobRequest + * @instance + */ + GetJobRequest.prototype.region = ""; + + /** + * GetJobRequest jobId. + * @member {string} jobId + * @memberof google.cloud.dataproc.v1.GetJobRequest + * @instance + */ + GetJobRequest.prototype.jobId = ""; + + /** + * Creates a new GetJobRequest instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1.GetJobRequest + * @static + * @param {google.cloud.dataproc.v1.IGetJobRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.GetJobRequest} GetJobRequest instance + */ + GetJobRequest.create = function create(properties) { + return new GetJobRequest(properties); + }; + + /** + * Encodes the specified GetJobRequest message. Does not implicitly {@link google.cloud.dataproc.v1.GetJobRequest.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1.GetJobRequest + * @static + * @param {google.cloud.dataproc.v1.IGetJobRequest} message GetJobRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetJobRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); + if (message.jobId != null && Object.hasOwnProperty.call(message, "jobId")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.jobId); + if (message.region != null && Object.hasOwnProperty.call(message, "region")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); + return writer; + }; + + /** + * Encodes the specified GetJobRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.GetJobRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1.GetJobRequest + * @static + * @param {google.cloud.dataproc.v1.IGetJobRequest} message GetJobRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetJobRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a GetJobRequest message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1.GetJobRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1.GetJobRequest} GetJobRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetJobRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.GetJobRequest(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.projectId = reader.string(); + break; + } + case 3: { + message.region = reader.string(); + break; + } + case 2: { + message.jobId = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; }; /** - * Creates a Job message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof google.cloud.dataproc.v1.Job + * Decodes a GetJobRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1.GetJobRequest * @static - * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.Job} Job + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1.GetJobRequest} GetJobRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Job.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.Job) - return object; - var message = new $root.google.cloud.dataproc.v1.Job(); - if (object.reference != null) { - if (typeof object.reference !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.reference: object expected"); - message.reference = $root.google.cloud.dataproc.v1.JobReference.fromObject(object.reference); - } - if (object.placement != null) { - if (typeof object.placement !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.placement: object expected"); - message.placement = $root.google.cloud.dataproc.v1.JobPlacement.fromObject(object.placement); - } - if (object.hadoopJob != null) { - if (typeof object.hadoopJob !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.hadoopJob: object expected"); - message.hadoopJob = $root.google.cloud.dataproc.v1.HadoopJob.fromObject(object.hadoopJob); - } - if (object.sparkJob != null) { - if (typeof object.sparkJob !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.sparkJob: object expected"); - message.sparkJob = $root.google.cloud.dataproc.v1.SparkJob.fromObject(object.sparkJob); - } - if (object.pysparkJob != null) { - if (typeof object.pysparkJob !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.pysparkJob: object expected"); - message.pysparkJob = $root.google.cloud.dataproc.v1.PySparkJob.fromObject(object.pysparkJob); - } - if (object.hiveJob != null) { - if (typeof object.hiveJob !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.hiveJob: object expected"); - message.hiveJob = $root.google.cloud.dataproc.v1.HiveJob.fromObject(object.hiveJob); - } - if (object.pigJob != null) { - if (typeof object.pigJob !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.pigJob: object expected"); - message.pigJob = $root.google.cloud.dataproc.v1.PigJob.fromObject(object.pigJob); - } - if (object.sparkRJob != null) { - if (typeof object.sparkRJob !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.sparkRJob: object expected"); - message.sparkRJob = $root.google.cloud.dataproc.v1.SparkRJob.fromObject(object.sparkRJob); - } - if (object.sparkSqlJob != null) { - if (typeof object.sparkSqlJob !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.sparkSqlJob: object expected"); - message.sparkSqlJob = $root.google.cloud.dataproc.v1.SparkSqlJob.fromObject(object.sparkSqlJob); - } - if (object.prestoJob != null) { - if (typeof object.prestoJob !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.prestoJob: object expected"); - message.prestoJob = $root.google.cloud.dataproc.v1.PrestoJob.fromObject(object.prestoJob); - } - if (object.status != null) { - if (typeof object.status !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.status: object expected"); - message.status = $root.google.cloud.dataproc.v1.JobStatus.fromObject(object.status); - } - if (object.statusHistory) { - if (!Array.isArray(object.statusHistory)) - throw TypeError(".google.cloud.dataproc.v1.Job.statusHistory: array expected"); - message.statusHistory = []; - for (var i = 0; i < object.statusHistory.length; ++i) { - if (typeof object.statusHistory[i] !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.statusHistory: object expected"); - message.statusHistory[i] = $root.google.cloud.dataproc.v1.JobStatus.fromObject(object.statusHistory[i]); - } - } - if (object.yarnApplications) { - if (!Array.isArray(object.yarnApplications)) - throw TypeError(".google.cloud.dataproc.v1.Job.yarnApplications: array expected"); - message.yarnApplications = []; - for (var i = 0; i < object.yarnApplications.length; ++i) { - if (typeof object.yarnApplications[i] !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.yarnApplications: object expected"); - message.yarnApplications[i] = $root.google.cloud.dataproc.v1.YarnApplication.fromObject(object.yarnApplications[i]); - } - } - if (object.driverOutputResourceUri != null) - message.driverOutputResourceUri = String(object.driverOutputResourceUri); - if (object.driverControlFilesUri != null) - message.driverControlFilesUri = String(object.driverControlFilesUri); - if (object.labels) { - if (typeof object.labels !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.labels: object expected"); - message.labels = {}; - for (var keys = Object.keys(object.labels), i = 0; i < keys.length; ++i) - message.labels[keys[i]] = String(object.labels[keys[i]]); - } - if (object.scheduling != null) { - if (typeof object.scheduling !== "object") - throw TypeError(".google.cloud.dataproc.v1.Job.scheduling: object expected"); - message.scheduling = $root.google.cloud.dataproc.v1.JobScheduling.fromObject(object.scheduling); - } - if (object.jobUuid != null) - message.jobUuid = String(object.jobUuid); - if (object.done != null) - message.done = Boolean(object.done); - return message; + GetJobRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); }; /** - * Creates a plain object from a Job message. Also converts values to other types if specified. - * @function toObject - * @memberof google.cloud.dataproc.v1.Job + * Verifies a GetJobRequest message. + * @function verify + * @memberof google.cloud.dataproc.v1.GetJobRequest * @static - * @param {google.cloud.dataproc.v1.Job} message Job - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Job.toObject = function toObject(message, options) { - if (!options) - options = {}; - var object = {}; - if (options.arrays || options.defaults) { - object.yarnApplications = []; - object.statusHistory = []; - } - if (options.objects || options.defaults) - object.labels = {}; - if (options.defaults) { - object.reference = null; - object.placement = null; - object.status = null; - object.driverControlFilesUri = ""; - object.driverOutputResourceUri = ""; - object.scheduling = null; - object.jobUuid = ""; - object.done = false; - } - if (message.reference != null && message.hasOwnProperty("reference")) - object.reference = $root.google.cloud.dataproc.v1.JobReference.toObject(message.reference, options); - if (message.placement != null && message.hasOwnProperty("placement")) - object.placement = $root.google.cloud.dataproc.v1.JobPlacement.toObject(message.placement, options); - if (message.hadoopJob != null && message.hasOwnProperty("hadoopJob")) { - object.hadoopJob = $root.google.cloud.dataproc.v1.HadoopJob.toObject(message.hadoopJob, options); - if (options.oneofs) - object.typeJob = "hadoopJob"; - } - if (message.sparkJob != null && message.hasOwnProperty("sparkJob")) { - object.sparkJob = $root.google.cloud.dataproc.v1.SparkJob.toObject(message.sparkJob, options); - if (options.oneofs) - object.typeJob = "sparkJob"; - } - if (message.pysparkJob != null && message.hasOwnProperty("pysparkJob")) { - object.pysparkJob = $root.google.cloud.dataproc.v1.PySparkJob.toObject(message.pysparkJob, options); - if (options.oneofs) - object.typeJob = "pysparkJob"; - } - if (message.hiveJob != null && message.hasOwnProperty("hiveJob")) { - object.hiveJob = $root.google.cloud.dataproc.v1.HiveJob.toObject(message.hiveJob, options); - if (options.oneofs) - object.typeJob = "hiveJob"; - } - if (message.pigJob != null && message.hasOwnProperty("pigJob")) { - object.pigJob = $root.google.cloud.dataproc.v1.PigJob.toObject(message.pigJob, options); - if (options.oneofs) - object.typeJob = "pigJob"; - } - if (message.status != null && message.hasOwnProperty("status")) - object.status = $root.google.cloud.dataproc.v1.JobStatus.toObject(message.status, options); - if (message.yarnApplications && message.yarnApplications.length) { - object.yarnApplications = []; - for (var j = 0; j < message.yarnApplications.length; ++j) - object.yarnApplications[j] = $root.google.cloud.dataproc.v1.YarnApplication.toObject(message.yarnApplications[j], options); - } - if (message.sparkSqlJob != null && message.hasOwnProperty("sparkSqlJob")) { - object.sparkSqlJob = $root.google.cloud.dataproc.v1.SparkSqlJob.toObject(message.sparkSqlJob, options); - if (options.oneofs) - object.typeJob = "sparkSqlJob"; - } - if (message.statusHistory && message.statusHistory.length) { - object.statusHistory = []; - for (var j = 0; j < message.statusHistory.length; ++j) - object.statusHistory[j] = $root.google.cloud.dataproc.v1.JobStatus.toObject(message.statusHistory[j], options); - } - if (message.driverControlFilesUri != null && message.hasOwnProperty("driverControlFilesUri")) - object.driverControlFilesUri = message.driverControlFilesUri; - if (message.driverOutputResourceUri != null && message.hasOwnProperty("driverOutputResourceUri")) - object.driverOutputResourceUri = message.driverOutputResourceUri; - var keys2; - if (message.labels && (keys2 = Object.keys(message.labels)).length) { - object.labels = {}; - for (var j = 0; j < keys2.length; ++j) - object.labels[keys2[j]] = message.labels[keys2[j]]; - } - if (message.scheduling != null && message.hasOwnProperty("scheduling")) - object.scheduling = $root.google.cloud.dataproc.v1.JobScheduling.toObject(message.scheduling, options); - if (message.sparkRJob != null && message.hasOwnProperty("sparkRJob")) { - object.sparkRJob = $root.google.cloud.dataproc.v1.SparkRJob.toObject(message.sparkRJob, options); - if (options.oneofs) - object.typeJob = "sparkRJob"; - } - if (message.jobUuid != null && message.hasOwnProperty("jobUuid")) - object.jobUuid = message.jobUuid; - if (message.prestoJob != null && message.hasOwnProperty("prestoJob")) { - object.prestoJob = $root.google.cloud.dataproc.v1.PrestoJob.toObject(message.prestoJob, options); - if (options.oneofs) - object.typeJob = "prestoJob"; + GetJobRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.projectId != null && message.hasOwnProperty("projectId")) + if (!$util.isString(message.projectId)) + return "projectId: string expected"; + if (message.region != null && message.hasOwnProperty("region")) + if (!$util.isString(message.region)) + return "region: string expected"; + if (message.jobId != null && message.hasOwnProperty("jobId")) + if (!$util.isString(message.jobId)) + return "jobId: string expected"; + return null; + }; + + /** + * Creates a GetJobRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.GetJobRequest + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1.GetJobRequest} GetJobRequest + */ + GetJobRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.GetJobRequest) + return object; + var message = new $root.google.cloud.dataproc.v1.GetJobRequest(); + if (object.projectId != null) + message.projectId = String(object.projectId); + if (object.region != null) + message.region = String(object.region); + if (object.jobId != null) + message.jobId = String(object.jobId); + return message; + }; + + /** + * Creates a plain object from a GetJobRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1.GetJobRequest + * @static + * @param {google.cloud.dataproc.v1.GetJobRequest} message GetJobRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + GetJobRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.projectId = ""; + object.jobId = ""; + object.region = ""; } - if (message.done != null && message.hasOwnProperty("done")) - object.done = message.done; + if (message.projectId != null && message.hasOwnProperty("projectId")) + object.projectId = message.projectId; + if (message.jobId != null && message.hasOwnProperty("jobId")) + object.jobId = message.jobId; + if (message.region != null && message.hasOwnProperty("region")) + object.region = message.region; return object; }; /** - * Converts this Job to JSON. + * Converts this GetJobRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.Job + * @memberof google.cloud.dataproc.v1.GetJobRequest * @instance * @returns {Object.} JSON object */ - Job.prototype.toJSON = function toJSON() { + GetJobRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Job + * Gets the default type url for GetJobRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.Job + * @memberof google.cloud.dataproc.v1.GetJobRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Job.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetJobRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.Job"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.GetJobRequest"; }; - return Job; + return GetJobRequest; })(); - v1.JobScheduling = (function() { + v1.ListJobsRequest = (function() { /** - * Properties of a JobScheduling. + * Properties of a ListJobsRequest. * @memberof google.cloud.dataproc.v1 - * @interface IJobScheduling - * @property {number|null} [maxFailuresPerHour] JobScheduling maxFailuresPerHour - * @property {number|null} [maxFailuresTotal] JobScheduling maxFailuresTotal + * @interface IListJobsRequest + * @property {string|null} [projectId] ListJobsRequest projectId + * @property {string|null} [region] ListJobsRequest region + * @property {number|null} [pageSize] ListJobsRequest pageSize + * @property {string|null} [pageToken] ListJobsRequest pageToken + * @property {string|null} [clusterName] ListJobsRequest clusterName + * @property {google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher|null} [jobStateMatcher] ListJobsRequest jobStateMatcher + * @property {string|null} [filter] ListJobsRequest filter */ /** - * Constructs a new JobScheduling. + * Constructs a new ListJobsRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a JobScheduling. - * @implements IJobScheduling + * @classdesc Represents a ListJobsRequest. + * @implements IListJobsRequest * @constructor - * @param {google.cloud.dataproc.v1.IJobScheduling=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IListJobsRequest=} [properties] Properties to set */ - function JobScheduling(properties) { + function ListJobsRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -28720,89 +30667,159 @@ } /** - * JobScheduling maxFailuresPerHour. - * @member {number} maxFailuresPerHour - * @memberof google.cloud.dataproc.v1.JobScheduling + * ListJobsRequest projectId. + * @member {string} projectId + * @memberof google.cloud.dataproc.v1.ListJobsRequest * @instance */ - JobScheduling.prototype.maxFailuresPerHour = 0; + ListJobsRequest.prototype.projectId = ""; /** - * JobScheduling maxFailuresTotal. - * @member {number} maxFailuresTotal - * @memberof google.cloud.dataproc.v1.JobScheduling + * ListJobsRequest region. + * @member {string} region + * @memberof google.cloud.dataproc.v1.ListJobsRequest * @instance */ - JobScheduling.prototype.maxFailuresTotal = 0; + ListJobsRequest.prototype.region = ""; /** - * Creates a new JobScheduling instance using the specified properties. + * ListJobsRequest pageSize. + * @member {number} pageSize + * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @instance + */ + ListJobsRequest.prototype.pageSize = 0; + + /** + * ListJobsRequest pageToken. + * @member {string} pageToken + * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @instance + */ + ListJobsRequest.prototype.pageToken = ""; + + /** + * ListJobsRequest clusterName. + * @member {string} clusterName + * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @instance + */ + ListJobsRequest.prototype.clusterName = ""; + + /** + * ListJobsRequest jobStateMatcher. + * @member {google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher} jobStateMatcher + * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @instance + */ + ListJobsRequest.prototype.jobStateMatcher = 0; + + /** + * ListJobsRequest filter. + * @member {string} filter + * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @instance + */ + ListJobsRequest.prototype.filter = ""; + + /** + * Creates a new ListJobsRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.JobScheduling + * @memberof google.cloud.dataproc.v1.ListJobsRequest * @static - * @param {google.cloud.dataproc.v1.IJobScheduling=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.JobScheduling} JobScheduling instance + * @param {google.cloud.dataproc.v1.IListJobsRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.ListJobsRequest} ListJobsRequest instance */ - JobScheduling.create = function create(properties) { - return new JobScheduling(properties); + ListJobsRequest.create = function create(properties) { + return new ListJobsRequest(properties); }; /** - * Encodes the specified JobScheduling message. Does not implicitly {@link google.cloud.dataproc.v1.JobScheduling.verify|verify} messages. + * Encodes the specified ListJobsRequest message. Does not implicitly {@link google.cloud.dataproc.v1.ListJobsRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.JobScheduling + * @memberof google.cloud.dataproc.v1.ListJobsRequest * @static - * @param {google.cloud.dataproc.v1.IJobScheduling} message JobScheduling message or plain object to encode + * @param {google.cloud.dataproc.v1.IListJobsRequest} message ListJobsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - JobScheduling.encode = function encode(message, writer) { + ListJobsRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.maxFailuresPerHour != null && Object.hasOwnProperty.call(message, "maxFailuresPerHour")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.maxFailuresPerHour); - if (message.maxFailuresTotal != null && Object.hasOwnProperty.call(message, "maxFailuresTotal")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.maxFailuresTotal); + if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); + if (message.pageSize != null && Object.hasOwnProperty.call(message, "pageSize")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.pageSize); + if (message.pageToken != null && Object.hasOwnProperty.call(message, "pageToken")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.pageToken); + if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.clusterName); + if (message.jobStateMatcher != null && Object.hasOwnProperty.call(message, "jobStateMatcher")) + writer.uint32(/* id 5, wireType 0 =*/40).int32(message.jobStateMatcher); + if (message.region != null && Object.hasOwnProperty.call(message, "region")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.region); + if (message.filter != null && Object.hasOwnProperty.call(message, "filter")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.filter); return writer; }; /** - * Encodes the specified JobScheduling message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.JobScheduling.verify|verify} messages. + * Encodes the specified ListJobsRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ListJobsRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.JobScheduling + * @memberof google.cloud.dataproc.v1.ListJobsRequest * @static - * @param {google.cloud.dataproc.v1.IJobScheduling} message JobScheduling message or plain object to encode + * @param {google.cloud.dataproc.v1.IListJobsRequest} message ListJobsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - JobScheduling.encodeDelimited = function encodeDelimited(message, writer) { + ListJobsRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a JobScheduling message from the specified reader or buffer. + * Decodes a ListJobsRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.JobScheduling + * @memberof google.cloud.dataproc.v1.ListJobsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.JobScheduling} JobScheduling + * @returns {google.cloud.dataproc.v1.ListJobsRequest} ListJobsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - JobScheduling.decode = function decode(reader, length) { + ListJobsRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.JobScheduling(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ListJobsRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.maxFailuresPerHour = reader.int32(); + message.projectId = reader.string(); + break; + } + case 6: { + message.region = reader.string(); break; } case 2: { - message.maxFailuresTotal = reader.int32(); + message.pageSize = reader.int32(); + break; + } + case 3: { + message.pageToken = reader.string(); + break; + } + case 4: { + message.clusterName = reader.string(); + break; + } + case 5: { + message.jobStateMatcher = reader.int32(); + break; + } + case 7: { + message.filter = reader.string(); break; } default: @@ -28814,134 +30831,215 @@ }; /** - * Decodes a JobScheduling message from the specified reader or buffer, length delimited. + * Decodes a ListJobsRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.JobScheduling + * @memberof google.cloud.dataproc.v1.ListJobsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.JobScheduling} JobScheduling + * @returns {google.cloud.dataproc.v1.ListJobsRequest} ListJobsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - JobScheduling.decodeDelimited = function decodeDelimited(reader) { + ListJobsRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a JobScheduling message. + * Verifies a ListJobsRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.JobScheduling + * @memberof google.cloud.dataproc.v1.ListJobsRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - JobScheduling.verify = function verify(message) { + ListJobsRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.maxFailuresPerHour != null && message.hasOwnProperty("maxFailuresPerHour")) - if (!$util.isInteger(message.maxFailuresPerHour)) - return "maxFailuresPerHour: integer expected"; - if (message.maxFailuresTotal != null && message.hasOwnProperty("maxFailuresTotal")) - if (!$util.isInteger(message.maxFailuresTotal)) - return "maxFailuresTotal: integer expected"; + if (message.projectId != null && message.hasOwnProperty("projectId")) + if (!$util.isString(message.projectId)) + return "projectId: string expected"; + if (message.region != null && message.hasOwnProperty("region")) + if (!$util.isString(message.region)) + return "region: string expected"; + if (message.pageSize != null && message.hasOwnProperty("pageSize")) + if (!$util.isInteger(message.pageSize)) + return "pageSize: integer expected"; + if (message.pageToken != null && message.hasOwnProperty("pageToken")) + if (!$util.isString(message.pageToken)) + return "pageToken: string expected"; + if (message.clusterName != null && message.hasOwnProperty("clusterName")) + if (!$util.isString(message.clusterName)) + return "clusterName: string expected"; + if (message.jobStateMatcher != null && message.hasOwnProperty("jobStateMatcher")) + switch (message.jobStateMatcher) { + default: + return "jobStateMatcher: enum value expected"; + case 0: + case 1: + case 2: + break; + } + if (message.filter != null && message.hasOwnProperty("filter")) + if (!$util.isString(message.filter)) + return "filter: string expected"; return null; }; /** - * Creates a JobScheduling message from a plain object. Also converts values to their respective internal types. + * Creates a ListJobsRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.JobScheduling + * @memberof google.cloud.dataproc.v1.ListJobsRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.JobScheduling} JobScheduling + * @returns {google.cloud.dataproc.v1.ListJobsRequest} ListJobsRequest */ - JobScheduling.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.JobScheduling) + ListJobsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.ListJobsRequest) return object; - var message = new $root.google.cloud.dataproc.v1.JobScheduling(); - if (object.maxFailuresPerHour != null) - message.maxFailuresPerHour = object.maxFailuresPerHour | 0; - if (object.maxFailuresTotal != null) - message.maxFailuresTotal = object.maxFailuresTotal | 0; + var message = new $root.google.cloud.dataproc.v1.ListJobsRequest(); + if (object.projectId != null) + message.projectId = String(object.projectId); + if (object.region != null) + message.region = String(object.region); + if (object.pageSize != null) + message.pageSize = object.pageSize | 0; + if (object.pageToken != null) + message.pageToken = String(object.pageToken); + if (object.clusterName != null) + message.clusterName = String(object.clusterName); + switch (object.jobStateMatcher) { + default: + if (typeof object.jobStateMatcher === "number") { + message.jobStateMatcher = object.jobStateMatcher; + break; + } + break; + case "ALL": + case 0: + message.jobStateMatcher = 0; + break; + case "ACTIVE": + case 1: + message.jobStateMatcher = 1; + break; + case "NON_ACTIVE": + case 2: + message.jobStateMatcher = 2; + break; + } + if (object.filter != null) + message.filter = String(object.filter); return message; }; /** - * Creates a plain object from a JobScheduling message. Also converts values to other types if specified. + * Creates a plain object from a ListJobsRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.JobScheduling + * @memberof google.cloud.dataproc.v1.ListJobsRequest * @static - * @param {google.cloud.dataproc.v1.JobScheduling} message JobScheduling + * @param {google.cloud.dataproc.v1.ListJobsRequest} message ListJobsRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - JobScheduling.toObject = function toObject(message, options) { + ListJobsRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { - object.maxFailuresPerHour = 0; - object.maxFailuresTotal = 0; + object.projectId = ""; + object.pageSize = 0; + object.pageToken = ""; + object.clusterName = ""; + object.jobStateMatcher = options.enums === String ? "ALL" : 0; + object.region = ""; + object.filter = ""; } - if (message.maxFailuresPerHour != null && message.hasOwnProperty("maxFailuresPerHour")) - object.maxFailuresPerHour = message.maxFailuresPerHour; - if (message.maxFailuresTotal != null && message.hasOwnProperty("maxFailuresTotal")) - object.maxFailuresTotal = message.maxFailuresTotal; + if (message.projectId != null && message.hasOwnProperty("projectId")) + object.projectId = message.projectId; + if (message.pageSize != null && message.hasOwnProperty("pageSize")) + object.pageSize = message.pageSize; + if (message.pageToken != null && message.hasOwnProperty("pageToken")) + object.pageToken = message.pageToken; + if (message.clusterName != null && message.hasOwnProperty("clusterName")) + object.clusterName = message.clusterName; + if (message.jobStateMatcher != null && message.hasOwnProperty("jobStateMatcher")) + object.jobStateMatcher = options.enums === String ? $root.google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher[message.jobStateMatcher] === undefined ? message.jobStateMatcher : $root.google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher[message.jobStateMatcher] : message.jobStateMatcher; + if (message.region != null && message.hasOwnProperty("region")) + object.region = message.region; + if (message.filter != null && message.hasOwnProperty("filter")) + object.filter = message.filter; return object; }; /** - * Converts this JobScheduling to JSON. + * Converts this ListJobsRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.JobScheduling + * @memberof google.cloud.dataproc.v1.ListJobsRequest * @instance * @returns {Object.} JSON object */ - JobScheduling.prototype.toJSON = function toJSON() { + ListJobsRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for JobScheduling + * Gets the default type url for ListJobsRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.JobScheduling + * @memberof google.cloud.dataproc.v1.ListJobsRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - JobScheduling.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ListJobsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.JobScheduling"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.ListJobsRequest"; }; - return JobScheduling; + /** + * JobStateMatcher enum. + * @name google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher + * @enum {number} + * @property {number} ALL=0 ALL value + * @property {number} ACTIVE=1 ACTIVE value + * @property {number} NON_ACTIVE=2 NON_ACTIVE value + */ + ListJobsRequest.JobStateMatcher = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "ALL"] = 0; + values[valuesById[1] = "ACTIVE"] = 1; + values[valuesById[2] = "NON_ACTIVE"] = 2; + return values; + })(); + + return ListJobsRequest; })(); - v1.SubmitJobRequest = (function() { + v1.UpdateJobRequest = (function() { /** - * Properties of a SubmitJobRequest. + * Properties of an UpdateJobRequest. * @memberof google.cloud.dataproc.v1 - * @interface ISubmitJobRequest - * @property {string|null} [projectId] SubmitJobRequest projectId - * @property {string|null} [region] SubmitJobRequest region - * @property {google.cloud.dataproc.v1.IJob|null} [job] SubmitJobRequest job - * @property {string|null} [requestId] SubmitJobRequest requestId + * @interface IUpdateJobRequest + * @property {string|null} [projectId] UpdateJobRequest projectId + * @property {string|null} [region] UpdateJobRequest region + * @property {string|null} [jobId] UpdateJobRequest jobId + * @property {google.cloud.dataproc.v1.IJob|null} [job] UpdateJobRequest job + * @property {google.protobuf.IFieldMask|null} [updateMask] UpdateJobRequest updateMask */ /** - * Constructs a new SubmitJobRequest. + * Constructs a new UpdateJobRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a SubmitJobRequest. - * @implements ISubmitJobRequest + * @classdesc Represents an UpdateJobRequest. + * @implements IUpdateJobRequest * @constructor - * @param {google.cloud.dataproc.v1.ISubmitJobRequest=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IUpdateJobRequest=} [properties] Properties to set */ - function SubmitJobRequest(properties) { + function UpdateJobRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -28949,100 +31047,110 @@ } /** - * SubmitJobRequest projectId. + * UpdateJobRequest projectId. * @member {string} projectId - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @instance */ - SubmitJobRequest.prototype.projectId = ""; + UpdateJobRequest.prototype.projectId = ""; /** - * SubmitJobRequest region. + * UpdateJobRequest region. * @member {string} region - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @instance */ - SubmitJobRequest.prototype.region = ""; + UpdateJobRequest.prototype.region = ""; /** - * SubmitJobRequest job. + * UpdateJobRequest jobId. + * @member {string} jobId + * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * @instance + */ + UpdateJobRequest.prototype.jobId = ""; + + /** + * UpdateJobRequest job. * @member {google.cloud.dataproc.v1.IJob|null|undefined} job - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @instance */ - SubmitJobRequest.prototype.job = null; + UpdateJobRequest.prototype.job = null; /** - * SubmitJobRequest requestId. - * @member {string} requestId - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * UpdateJobRequest updateMask. + * @member {google.protobuf.IFieldMask|null|undefined} updateMask + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @instance */ - SubmitJobRequest.prototype.requestId = ""; + UpdateJobRequest.prototype.updateMask = null; /** - * Creates a new SubmitJobRequest instance using the specified properties. + * Creates a new UpdateJobRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @static - * @param {google.cloud.dataproc.v1.ISubmitJobRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.SubmitJobRequest} SubmitJobRequest instance + * @param {google.cloud.dataproc.v1.IUpdateJobRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.UpdateJobRequest} UpdateJobRequest instance */ - SubmitJobRequest.create = function create(properties) { - return new SubmitJobRequest(properties); + UpdateJobRequest.create = function create(properties) { + return new UpdateJobRequest(properties); }; /** - * Encodes the specified SubmitJobRequest message. Does not implicitly {@link google.cloud.dataproc.v1.SubmitJobRequest.verify|verify} messages. + * Encodes the specified UpdateJobRequest message. Does not implicitly {@link google.cloud.dataproc.v1.UpdateJobRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @static - * @param {google.cloud.dataproc.v1.ISubmitJobRequest} message SubmitJobRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IUpdateJobRequest} message UpdateJobRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SubmitJobRequest.encode = function encode(message, writer) { + UpdateJobRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); - if (message.job != null && Object.hasOwnProperty.call(message, "job")) - $root.google.cloud.dataproc.v1.Job.encode(message.job, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.region != null && Object.hasOwnProperty.call(message, "region")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); - if (message.requestId != null && Object.hasOwnProperty.call(message, "requestId")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.requestId); + writer.uint32(/* id 2, wireType 2 =*/18).string(message.region); + if (message.jobId != null && Object.hasOwnProperty.call(message, "jobId")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.jobId); + if (message.job != null && Object.hasOwnProperty.call(message, "job")) + $root.google.cloud.dataproc.v1.Job.encode(message.job, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.updateMask != null && Object.hasOwnProperty.call(message, "updateMask")) + $root.google.protobuf.FieldMask.encode(message.updateMask, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); return writer; }; /** - * Encodes the specified SubmitJobRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.SubmitJobRequest.verify|verify} messages. + * Encodes the specified UpdateJobRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.UpdateJobRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @static - * @param {google.cloud.dataproc.v1.ISubmitJobRequest} message SubmitJobRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IUpdateJobRequest} message UpdateJobRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SubmitJobRequest.encodeDelimited = function encodeDelimited(message, writer) { + UpdateJobRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SubmitJobRequest message from the specified reader or buffer. + * Decodes an UpdateJobRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.SubmitJobRequest} SubmitJobRequest + * @returns {google.cloud.dataproc.v1.UpdateJobRequest} UpdateJobRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SubmitJobRequest.decode = function decode(reader, length) { + UpdateJobRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.SubmitJobRequest(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.UpdateJobRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { @@ -29050,16 +31158,20 @@ message.projectId = reader.string(); break; } - case 3: { + case 2: { message.region = reader.string(); break; } - case 2: { - message.job = $root.google.cloud.dataproc.v1.Job.decode(reader, reader.uint32()); + case 3: { + message.jobId = reader.string(); break; } case 4: { - message.requestId = reader.string(); + message.job = $root.google.cloud.dataproc.v1.Job.decode(reader, reader.uint32()); + break; + } + case 5: { + message.updateMask = $root.google.protobuf.FieldMask.decode(reader, reader.uint32()); break; } default: @@ -29071,30 +31183,30 @@ }; /** - * Decodes a SubmitJobRequest message from the specified reader or buffer, length delimited. + * Decodes an UpdateJobRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.SubmitJobRequest} SubmitJobRequest + * @returns {google.cloud.dataproc.v1.UpdateJobRequest} UpdateJobRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SubmitJobRequest.decodeDelimited = function decodeDelimited(reader) { + UpdateJobRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SubmitJobRequest message. + * Verifies an UpdateJobRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SubmitJobRequest.verify = function verify(message) { + UpdateJobRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.projectId != null && message.hasOwnProperty("projectId")) @@ -29103,123 +31215,135 @@ if (message.region != null && message.hasOwnProperty("region")) if (!$util.isString(message.region)) return "region: string expected"; + if (message.jobId != null && message.hasOwnProperty("jobId")) + if (!$util.isString(message.jobId)) + return "jobId: string expected"; if (message.job != null && message.hasOwnProperty("job")) { var error = $root.google.cloud.dataproc.v1.Job.verify(message.job); if (error) return "job." + error; } - if (message.requestId != null && message.hasOwnProperty("requestId")) - if (!$util.isString(message.requestId)) - return "requestId: string expected"; + if (message.updateMask != null && message.hasOwnProperty("updateMask")) { + var error = $root.google.protobuf.FieldMask.verify(message.updateMask); + if (error) + return "updateMask." + error; + } return null; }; /** - * Creates a SubmitJobRequest message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateJobRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.SubmitJobRequest} SubmitJobRequest + * @returns {google.cloud.dataproc.v1.UpdateJobRequest} UpdateJobRequest */ - SubmitJobRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.SubmitJobRequest) + UpdateJobRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.UpdateJobRequest) return object; - var message = new $root.google.cloud.dataproc.v1.SubmitJobRequest(); + var message = new $root.google.cloud.dataproc.v1.UpdateJobRequest(); if (object.projectId != null) message.projectId = String(object.projectId); if (object.region != null) message.region = String(object.region); + if (object.jobId != null) + message.jobId = String(object.jobId); if (object.job != null) { if (typeof object.job !== "object") - throw TypeError(".google.cloud.dataproc.v1.SubmitJobRequest.job: object expected"); + throw TypeError(".google.cloud.dataproc.v1.UpdateJobRequest.job: object expected"); message.job = $root.google.cloud.dataproc.v1.Job.fromObject(object.job); } - if (object.requestId != null) - message.requestId = String(object.requestId); + if (object.updateMask != null) { + if (typeof object.updateMask !== "object") + throw TypeError(".google.cloud.dataproc.v1.UpdateJobRequest.updateMask: object expected"); + message.updateMask = $root.google.protobuf.FieldMask.fromObject(object.updateMask); + } return message; }; /** - * Creates a plain object from a SubmitJobRequest message. Also converts values to other types if specified. + * Creates a plain object from an UpdateJobRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @static - * @param {google.cloud.dataproc.v1.SubmitJobRequest} message SubmitJobRequest + * @param {google.cloud.dataproc.v1.UpdateJobRequest} message UpdateJobRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SubmitJobRequest.toObject = function toObject(message, options) { + UpdateJobRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { object.projectId = ""; - object.job = null; object.region = ""; - object.requestId = ""; + object.jobId = ""; + object.job = null; + object.updateMask = null; } if (message.projectId != null && message.hasOwnProperty("projectId")) object.projectId = message.projectId; - if (message.job != null && message.hasOwnProperty("job")) - object.job = $root.google.cloud.dataproc.v1.Job.toObject(message.job, options); if (message.region != null && message.hasOwnProperty("region")) object.region = message.region; - if (message.requestId != null && message.hasOwnProperty("requestId")) - object.requestId = message.requestId; + if (message.jobId != null && message.hasOwnProperty("jobId")) + object.jobId = message.jobId; + if (message.job != null && message.hasOwnProperty("job")) + object.job = $root.google.cloud.dataproc.v1.Job.toObject(message.job, options); + if (message.updateMask != null && message.hasOwnProperty("updateMask")) + object.updateMask = $root.google.protobuf.FieldMask.toObject(message.updateMask, options); return object; }; /** - * Converts this SubmitJobRequest to JSON. + * Converts this UpdateJobRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @instance * @returns {Object.} JSON object */ - SubmitJobRequest.prototype.toJSON = function toJSON() { + UpdateJobRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SubmitJobRequest + * Gets the default type url for UpdateJobRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.SubmitJobRequest + * @memberof google.cloud.dataproc.v1.UpdateJobRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SubmitJobRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + UpdateJobRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.SubmitJobRequest"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.UpdateJobRequest"; }; - return SubmitJobRequest; + return UpdateJobRequest; })(); - v1.JobMetadata = (function() { + v1.ListJobsResponse = (function() { /** - * Properties of a JobMetadata. + * Properties of a ListJobsResponse. * @memberof google.cloud.dataproc.v1 - * @interface IJobMetadata - * @property {string|null} [jobId] JobMetadata jobId - * @property {google.cloud.dataproc.v1.IJobStatus|null} [status] JobMetadata status - * @property {string|null} [operationType] JobMetadata operationType - * @property {google.protobuf.ITimestamp|null} [startTime] JobMetadata startTime + * @interface IListJobsResponse + * @property {Array.|null} [jobs] ListJobsResponse jobs + * @property {string|null} [nextPageToken] ListJobsResponse nextPageToken */ /** - * Constructs a new JobMetadata. + * Constructs a new ListJobsResponse. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a JobMetadata. - * @implements IJobMetadata + * @classdesc Represents a ListJobsResponse. + * @implements IListJobsResponse * @constructor - * @param {google.cloud.dataproc.v1.IJobMetadata=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IListJobsResponse=} [properties] Properties to set */ - function JobMetadata(properties) { + function ListJobsResponse(properties) { + this.jobs = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -29227,117 +31351,92 @@ } /** - * JobMetadata jobId. - * @member {string} jobId - * @memberof google.cloud.dataproc.v1.JobMetadata - * @instance - */ - JobMetadata.prototype.jobId = ""; - - /** - * JobMetadata status. - * @member {google.cloud.dataproc.v1.IJobStatus|null|undefined} status - * @memberof google.cloud.dataproc.v1.JobMetadata - * @instance - */ - JobMetadata.prototype.status = null; - - /** - * JobMetadata operationType. - * @member {string} operationType - * @memberof google.cloud.dataproc.v1.JobMetadata + * ListJobsResponse jobs. + * @member {Array.} jobs + * @memberof google.cloud.dataproc.v1.ListJobsResponse * @instance */ - JobMetadata.prototype.operationType = ""; + ListJobsResponse.prototype.jobs = $util.emptyArray; /** - * JobMetadata startTime. - * @member {google.protobuf.ITimestamp|null|undefined} startTime - * @memberof google.cloud.dataproc.v1.JobMetadata + * ListJobsResponse nextPageToken. + * @member {string} nextPageToken + * @memberof google.cloud.dataproc.v1.ListJobsResponse * @instance */ - JobMetadata.prototype.startTime = null; + ListJobsResponse.prototype.nextPageToken = ""; /** - * Creates a new JobMetadata instance using the specified properties. + * Creates a new ListJobsResponse instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.JobMetadata + * @memberof google.cloud.dataproc.v1.ListJobsResponse * @static - * @param {google.cloud.dataproc.v1.IJobMetadata=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.JobMetadata} JobMetadata instance + * @param {google.cloud.dataproc.v1.IListJobsResponse=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.ListJobsResponse} ListJobsResponse instance */ - JobMetadata.create = function create(properties) { - return new JobMetadata(properties); + ListJobsResponse.create = function create(properties) { + return new ListJobsResponse(properties); }; /** - * Encodes the specified JobMetadata message. Does not implicitly {@link google.cloud.dataproc.v1.JobMetadata.verify|verify} messages. + * Encodes the specified ListJobsResponse message. Does not implicitly {@link google.cloud.dataproc.v1.ListJobsResponse.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.JobMetadata + * @memberof google.cloud.dataproc.v1.ListJobsResponse * @static - * @param {google.cloud.dataproc.v1.IJobMetadata} message JobMetadata message or plain object to encode + * @param {google.cloud.dataproc.v1.IListJobsResponse} message ListJobsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - JobMetadata.encode = function encode(message, writer) { + ListJobsResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.jobId != null && Object.hasOwnProperty.call(message, "jobId")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.jobId); - if (message.status != null && Object.hasOwnProperty.call(message, "status")) - $root.google.cloud.dataproc.v1.JobStatus.encode(message.status, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.operationType != null && Object.hasOwnProperty.call(message, "operationType")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.operationType); - if (message.startTime != null && Object.hasOwnProperty.call(message, "startTime")) - $root.google.protobuf.Timestamp.encode(message.startTime, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.jobs != null && message.jobs.length) + for (var i = 0; i < message.jobs.length; ++i) + $root.google.cloud.dataproc.v1.Job.encode(message.jobs[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.nextPageToken != null && Object.hasOwnProperty.call(message, "nextPageToken")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.nextPageToken); return writer; }; /** - * Encodes the specified JobMetadata message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.JobMetadata.verify|verify} messages. + * Encodes the specified ListJobsResponse message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ListJobsResponse.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.JobMetadata + * @memberof google.cloud.dataproc.v1.ListJobsResponse * @static - * @param {google.cloud.dataproc.v1.IJobMetadata} message JobMetadata message or plain object to encode + * @param {google.cloud.dataproc.v1.IListJobsResponse} message ListJobsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - JobMetadata.encodeDelimited = function encodeDelimited(message, writer) { + ListJobsResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a JobMetadata message from the specified reader or buffer. + * Decodes a ListJobsResponse message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.JobMetadata + * @memberof google.cloud.dataproc.v1.ListJobsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.JobMetadata} JobMetadata + * @returns {google.cloud.dataproc.v1.ListJobsResponse} ListJobsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - JobMetadata.decode = function decode(reader, length) { + ListJobsResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.JobMetadata(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ListJobsResponse(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.jobId = reader.string(); + if (!(message.jobs && message.jobs.length)) + message.jobs = []; + message.jobs.push($root.google.cloud.dataproc.v1.Job.decode(reader, reader.uint32())); break; } case 2: { - message.status = $root.google.cloud.dataproc.v1.JobStatus.decode(reader, reader.uint32()); - break; - } - case 3: { - message.operationType = reader.string(); - break; - } - case 4: { - message.startTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + message.nextPageToken = reader.string(); break; } default: @@ -29349,159 +31448,150 @@ }; /** - * Decodes a JobMetadata message from the specified reader or buffer, length delimited. + * Decodes a ListJobsResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.JobMetadata + * @memberof google.cloud.dataproc.v1.ListJobsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.JobMetadata} JobMetadata + * @returns {google.cloud.dataproc.v1.ListJobsResponse} ListJobsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - JobMetadata.decodeDelimited = function decodeDelimited(reader) { + ListJobsResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a JobMetadata message. + * Verifies a ListJobsResponse message. * @function verify - * @memberof google.cloud.dataproc.v1.JobMetadata + * @memberof google.cloud.dataproc.v1.ListJobsResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - JobMetadata.verify = function verify(message) { + ListJobsResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.jobId != null && message.hasOwnProperty("jobId")) - if (!$util.isString(message.jobId)) - return "jobId: string expected"; - if (message.status != null && message.hasOwnProperty("status")) { - var error = $root.google.cloud.dataproc.v1.JobStatus.verify(message.status); - if (error) - return "status." + error; - } - if (message.operationType != null && message.hasOwnProperty("operationType")) - if (!$util.isString(message.operationType)) - return "operationType: string expected"; - if (message.startTime != null && message.hasOwnProperty("startTime")) { - var error = $root.google.protobuf.Timestamp.verify(message.startTime); - if (error) - return "startTime." + error; + if (message.jobs != null && message.hasOwnProperty("jobs")) { + if (!Array.isArray(message.jobs)) + return "jobs: array expected"; + for (var i = 0; i < message.jobs.length; ++i) { + var error = $root.google.cloud.dataproc.v1.Job.verify(message.jobs[i]); + if (error) + return "jobs." + error; + } } + if (message.nextPageToken != null && message.hasOwnProperty("nextPageToken")) + if (!$util.isString(message.nextPageToken)) + return "nextPageToken: string expected"; return null; }; /** - * Creates a JobMetadata message from a plain object. Also converts values to their respective internal types. + * Creates a ListJobsResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.JobMetadata + * @memberof google.cloud.dataproc.v1.ListJobsResponse * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.JobMetadata} JobMetadata + * @returns {google.cloud.dataproc.v1.ListJobsResponse} ListJobsResponse */ - JobMetadata.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.JobMetadata) - return object; - var message = new $root.google.cloud.dataproc.v1.JobMetadata(); - if (object.jobId != null) - message.jobId = String(object.jobId); - if (object.status != null) { - if (typeof object.status !== "object") - throw TypeError(".google.cloud.dataproc.v1.JobMetadata.status: object expected"); - message.status = $root.google.cloud.dataproc.v1.JobStatus.fromObject(object.status); - } - if (object.operationType != null) - message.operationType = String(object.operationType); - if (object.startTime != null) { - if (typeof object.startTime !== "object") - throw TypeError(".google.cloud.dataproc.v1.JobMetadata.startTime: object expected"); - message.startTime = $root.google.protobuf.Timestamp.fromObject(object.startTime); + ListJobsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.ListJobsResponse) + return object; + var message = new $root.google.cloud.dataproc.v1.ListJobsResponse(); + if (object.jobs) { + if (!Array.isArray(object.jobs)) + throw TypeError(".google.cloud.dataproc.v1.ListJobsResponse.jobs: array expected"); + message.jobs = []; + for (var i = 0; i < object.jobs.length; ++i) { + if (typeof object.jobs[i] !== "object") + throw TypeError(".google.cloud.dataproc.v1.ListJobsResponse.jobs: object expected"); + message.jobs[i] = $root.google.cloud.dataproc.v1.Job.fromObject(object.jobs[i]); + } } + if (object.nextPageToken != null) + message.nextPageToken = String(object.nextPageToken); return message; }; /** - * Creates a plain object from a JobMetadata message. Also converts values to other types if specified. + * Creates a plain object from a ListJobsResponse message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.JobMetadata + * @memberof google.cloud.dataproc.v1.ListJobsResponse * @static - * @param {google.cloud.dataproc.v1.JobMetadata} message JobMetadata + * @param {google.cloud.dataproc.v1.ListJobsResponse} message ListJobsResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - JobMetadata.toObject = function toObject(message, options) { + ListJobsResponse.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.defaults) { - object.jobId = ""; - object.status = null; - object.operationType = ""; - object.startTime = null; + if (options.arrays || options.defaults) + object.jobs = []; + if (options.defaults) + object.nextPageToken = ""; + if (message.jobs && message.jobs.length) { + object.jobs = []; + for (var j = 0; j < message.jobs.length; ++j) + object.jobs[j] = $root.google.cloud.dataproc.v1.Job.toObject(message.jobs[j], options); } - if (message.jobId != null && message.hasOwnProperty("jobId")) - object.jobId = message.jobId; - if (message.status != null && message.hasOwnProperty("status")) - object.status = $root.google.cloud.dataproc.v1.JobStatus.toObject(message.status, options); - if (message.operationType != null && message.hasOwnProperty("operationType")) - object.operationType = message.operationType; - if (message.startTime != null && message.hasOwnProperty("startTime")) - object.startTime = $root.google.protobuf.Timestamp.toObject(message.startTime, options); + if (message.nextPageToken != null && message.hasOwnProperty("nextPageToken")) + object.nextPageToken = message.nextPageToken; return object; }; /** - * Converts this JobMetadata to JSON. + * Converts this ListJobsResponse to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.JobMetadata + * @memberof google.cloud.dataproc.v1.ListJobsResponse * @instance * @returns {Object.} JSON object */ - JobMetadata.prototype.toJSON = function toJSON() { + ListJobsResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for JobMetadata + * Gets the default type url for ListJobsResponse * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.JobMetadata + * @memberof google.cloud.dataproc.v1.ListJobsResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - JobMetadata.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ListJobsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.JobMetadata"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.ListJobsResponse"; }; - return JobMetadata; + return ListJobsResponse; })(); - v1.GetJobRequest = (function() { + v1.CancelJobRequest = (function() { /** - * Properties of a GetJobRequest. + * Properties of a CancelJobRequest. * @memberof google.cloud.dataproc.v1 - * @interface IGetJobRequest - * @property {string|null} [projectId] GetJobRequest projectId - * @property {string|null} [region] GetJobRequest region - * @property {string|null} [jobId] GetJobRequest jobId + * @interface ICancelJobRequest + * @property {string|null} [projectId] CancelJobRequest projectId + * @property {string|null} [region] CancelJobRequest region + * @property {string|null} [jobId] CancelJobRequest jobId */ /** - * Constructs a new GetJobRequest. + * Constructs a new CancelJobRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a GetJobRequest. - * @implements IGetJobRequest + * @classdesc Represents a CancelJobRequest. + * @implements ICancelJobRequest * @constructor - * @param {google.cloud.dataproc.v1.IGetJobRequest=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.ICancelJobRequest=} [properties] Properties to set */ - function GetJobRequest(properties) { + function CancelJobRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -29509,51 +31599,51 @@ } /** - * GetJobRequest projectId. + * CancelJobRequest projectId. * @member {string} projectId - * @memberof google.cloud.dataproc.v1.GetJobRequest + * @memberof google.cloud.dataproc.v1.CancelJobRequest * @instance */ - GetJobRequest.prototype.projectId = ""; + CancelJobRequest.prototype.projectId = ""; /** - * GetJobRequest region. + * CancelJobRequest region. * @member {string} region - * @memberof google.cloud.dataproc.v1.GetJobRequest + * @memberof google.cloud.dataproc.v1.CancelJobRequest * @instance */ - GetJobRequest.prototype.region = ""; + CancelJobRequest.prototype.region = ""; /** - * GetJobRequest jobId. + * CancelJobRequest jobId. * @member {string} jobId - * @memberof google.cloud.dataproc.v1.GetJobRequest + * @memberof google.cloud.dataproc.v1.CancelJobRequest * @instance */ - GetJobRequest.prototype.jobId = ""; + CancelJobRequest.prototype.jobId = ""; /** - * Creates a new GetJobRequest instance using the specified properties. + * Creates a new CancelJobRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.GetJobRequest + * @memberof google.cloud.dataproc.v1.CancelJobRequest * @static - * @param {google.cloud.dataproc.v1.IGetJobRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.GetJobRequest} GetJobRequest instance + * @param {google.cloud.dataproc.v1.ICancelJobRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.CancelJobRequest} CancelJobRequest instance */ - GetJobRequest.create = function create(properties) { - return new GetJobRequest(properties); + CancelJobRequest.create = function create(properties) { + return new CancelJobRequest(properties); }; /** - * Encodes the specified GetJobRequest message. Does not implicitly {@link google.cloud.dataproc.v1.GetJobRequest.verify|verify} messages. + * Encodes the specified CancelJobRequest message. Does not implicitly {@link google.cloud.dataproc.v1.CancelJobRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.GetJobRequest + * @memberof google.cloud.dataproc.v1.CancelJobRequest * @static - * @param {google.cloud.dataproc.v1.IGetJobRequest} message GetJobRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.ICancelJobRequest} message CancelJobRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetJobRequest.encode = function encode(message, writer) { + CancelJobRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) @@ -29566,33 +31656,33 @@ }; /** - * Encodes the specified GetJobRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.GetJobRequest.verify|verify} messages. + * Encodes the specified CancelJobRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.CancelJobRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.GetJobRequest + * @memberof google.cloud.dataproc.v1.CancelJobRequest * @static - * @param {google.cloud.dataproc.v1.IGetJobRequest} message GetJobRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.ICancelJobRequest} message CancelJobRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetJobRequest.encodeDelimited = function encodeDelimited(message, writer) { + CancelJobRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetJobRequest message from the specified reader or buffer. + * Decodes a CancelJobRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.GetJobRequest + * @memberof google.cloud.dataproc.v1.CancelJobRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.GetJobRequest} GetJobRequest + * @returns {google.cloud.dataproc.v1.CancelJobRequest} CancelJobRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetJobRequest.decode = function decode(reader, length) { + CancelJobRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.GetJobRequest(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.CancelJobRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { @@ -29617,30 +31707,30 @@ }; /** - * Decodes a GetJobRequest message from the specified reader or buffer, length delimited. + * Decodes a CancelJobRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.GetJobRequest + * @memberof google.cloud.dataproc.v1.CancelJobRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.GetJobRequest} GetJobRequest + * @returns {google.cloud.dataproc.v1.CancelJobRequest} CancelJobRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetJobRequest.decodeDelimited = function decodeDelimited(reader) { + CancelJobRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetJobRequest message. + * Verifies a CancelJobRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.GetJobRequest + * @memberof google.cloud.dataproc.v1.CancelJobRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetJobRequest.verify = function verify(message) { + CancelJobRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.projectId != null && message.hasOwnProperty("projectId")) @@ -29656,17 +31746,17 @@ }; /** - * Creates a GetJobRequest message from a plain object. Also converts values to their respective internal types. + * Creates a CancelJobRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.GetJobRequest + * @memberof google.cloud.dataproc.v1.CancelJobRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.GetJobRequest} GetJobRequest + * @returns {google.cloud.dataproc.v1.CancelJobRequest} CancelJobRequest */ - GetJobRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.GetJobRequest) + CancelJobRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.CancelJobRequest) return object; - var message = new $root.google.cloud.dataproc.v1.GetJobRequest(); + var message = new $root.google.cloud.dataproc.v1.CancelJobRequest(); if (object.projectId != null) message.projectId = String(object.projectId); if (object.region != null) @@ -29677,15 +31767,15 @@ }; /** - * Creates a plain object from a GetJobRequest message. Also converts values to other types if specified. + * Creates a plain object from a CancelJobRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.GetJobRequest + * @memberof google.cloud.dataproc.v1.CancelJobRequest * @static - * @param {google.cloud.dataproc.v1.GetJobRequest} message GetJobRequest + * @param {google.cloud.dataproc.v1.CancelJobRequest} message CancelJobRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetJobRequest.toObject = function toObject(message, options) { + CancelJobRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; @@ -29704,58 +31794,54 @@ }; /** - * Converts this GetJobRequest to JSON. + * Converts this CancelJobRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.GetJobRequest + * @memberof google.cloud.dataproc.v1.CancelJobRequest * @instance * @returns {Object.} JSON object */ - GetJobRequest.prototype.toJSON = function toJSON() { + CancelJobRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetJobRequest + * Gets the default type url for CancelJobRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.GetJobRequest + * @memberof google.cloud.dataproc.v1.CancelJobRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetJobRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CancelJobRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.GetJobRequest"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.CancelJobRequest"; }; - return GetJobRequest; + return CancelJobRequest; })(); - v1.ListJobsRequest = (function() { + v1.DeleteJobRequest = (function() { /** - * Properties of a ListJobsRequest. + * Properties of a DeleteJobRequest. * @memberof google.cloud.dataproc.v1 - * @interface IListJobsRequest - * @property {string|null} [projectId] ListJobsRequest projectId - * @property {string|null} [region] ListJobsRequest region - * @property {number|null} [pageSize] ListJobsRequest pageSize - * @property {string|null} [pageToken] ListJobsRequest pageToken - * @property {string|null} [clusterName] ListJobsRequest clusterName - * @property {google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher|null} [jobStateMatcher] ListJobsRequest jobStateMatcher - * @property {string|null} [filter] ListJobsRequest filter + * @interface IDeleteJobRequest + * @property {string|null} [projectId] DeleteJobRequest projectId + * @property {string|null} [region] DeleteJobRequest region + * @property {string|null} [jobId] DeleteJobRequest jobId */ /** - * Constructs a new ListJobsRequest. + * Constructs a new DeleteJobRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a ListJobsRequest. - * @implements IListJobsRequest + * @classdesc Represents a DeleteJobRequest. + * @implements IDeleteJobRequest * @constructor - * @param {google.cloud.dataproc.v1.IListJobsRequest=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IDeleteJobRequest=} [properties] Properties to set */ - function ListJobsRequest(properties) { + function DeleteJobRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -29763,130 +31849,90 @@ } /** - * ListJobsRequest projectId. + * DeleteJobRequest projectId. * @member {string} projectId - * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @memberof google.cloud.dataproc.v1.DeleteJobRequest * @instance */ - ListJobsRequest.prototype.projectId = ""; + DeleteJobRequest.prototype.projectId = ""; /** - * ListJobsRequest region. + * DeleteJobRequest region. * @member {string} region - * @memberof google.cloud.dataproc.v1.ListJobsRequest - * @instance - */ - ListJobsRequest.prototype.region = ""; - - /** - * ListJobsRequest pageSize. - * @member {number} pageSize - * @memberof google.cloud.dataproc.v1.ListJobsRequest - * @instance - */ - ListJobsRequest.prototype.pageSize = 0; - - /** - * ListJobsRequest pageToken. - * @member {string} pageToken - * @memberof google.cloud.dataproc.v1.ListJobsRequest - * @instance - */ - ListJobsRequest.prototype.pageToken = ""; - - /** - * ListJobsRequest clusterName. - * @member {string} clusterName - * @memberof google.cloud.dataproc.v1.ListJobsRequest - * @instance - */ - ListJobsRequest.prototype.clusterName = ""; - - /** - * ListJobsRequest jobStateMatcher. - * @member {google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher} jobStateMatcher - * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @memberof google.cloud.dataproc.v1.DeleteJobRequest * @instance */ - ListJobsRequest.prototype.jobStateMatcher = 0; + DeleteJobRequest.prototype.region = ""; /** - * ListJobsRequest filter. - * @member {string} filter - * @memberof google.cloud.dataproc.v1.ListJobsRequest + * DeleteJobRequest jobId. + * @member {string} jobId + * @memberof google.cloud.dataproc.v1.DeleteJobRequest * @instance */ - ListJobsRequest.prototype.filter = ""; + DeleteJobRequest.prototype.jobId = ""; /** - * Creates a new ListJobsRequest instance using the specified properties. + * Creates a new DeleteJobRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @memberof google.cloud.dataproc.v1.DeleteJobRequest * @static - * @param {google.cloud.dataproc.v1.IListJobsRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.ListJobsRequest} ListJobsRequest instance + * @param {google.cloud.dataproc.v1.IDeleteJobRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.DeleteJobRequest} DeleteJobRequest instance */ - ListJobsRequest.create = function create(properties) { - return new ListJobsRequest(properties); + DeleteJobRequest.create = function create(properties) { + return new DeleteJobRequest(properties); }; /** - * Encodes the specified ListJobsRequest message. Does not implicitly {@link google.cloud.dataproc.v1.ListJobsRequest.verify|verify} messages. + * Encodes the specified DeleteJobRequest message. Does not implicitly {@link google.cloud.dataproc.v1.DeleteJobRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @memberof google.cloud.dataproc.v1.DeleteJobRequest * @static - * @param {google.cloud.dataproc.v1.IListJobsRequest} message ListJobsRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IDeleteJobRequest} message DeleteJobRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ListJobsRequest.encode = function encode(message, writer) { + DeleteJobRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); - if (message.pageSize != null && Object.hasOwnProperty.call(message, "pageSize")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.pageSize); - if (message.pageToken != null && Object.hasOwnProperty.call(message, "pageToken")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.pageToken); - if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.clusterName); - if (message.jobStateMatcher != null && Object.hasOwnProperty.call(message, "jobStateMatcher")) - writer.uint32(/* id 5, wireType 0 =*/40).int32(message.jobStateMatcher); + if (message.jobId != null && Object.hasOwnProperty.call(message, "jobId")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.jobId); if (message.region != null && Object.hasOwnProperty.call(message, "region")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.region); - if (message.filter != null && Object.hasOwnProperty.call(message, "filter")) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.filter); + writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); return writer; }; /** - * Encodes the specified ListJobsRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ListJobsRequest.verify|verify} messages. + * Encodes the specified DeleteJobRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DeleteJobRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @memberof google.cloud.dataproc.v1.DeleteJobRequest * @static - * @param {google.cloud.dataproc.v1.IListJobsRequest} message ListJobsRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IDeleteJobRequest} message DeleteJobRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ListJobsRequest.encodeDelimited = function encodeDelimited(message, writer) { + DeleteJobRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ListJobsRequest message from the specified reader or buffer. + * Decodes a DeleteJobRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @memberof google.cloud.dataproc.v1.DeleteJobRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.ListJobsRequest} ListJobsRequest + * @returns {google.cloud.dataproc.v1.DeleteJobRequest} DeleteJobRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListJobsRequest.decode = function decode(reader, length) { + DeleteJobRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ListJobsRequest(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.DeleteJobRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { @@ -29894,28 +31940,12 @@ message.projectId = reader.string(); break; } - case 6: { + case 3: { message.region = reader.string(); break; } case 2: { - message.pageSize = reader.int32(); - break; - } - case 3: { - message.pageToken = reader.string(); - break; - } - case 4: { - message.clusterName = reader.string(); - break; - } - case 5: { - message.jobStateMatcher = reader.int32(); - break; - } - case 7: { - message.filter = reader.string(); + message.jobId = reader.string(); break; } default: @@ -29927,30 +31957,30 @@ }; /** - * Decodes a ListJobsRequest message from the specified reader or buffer, length delimited. + * Decodes a DeleteJobRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @memberof google.cloud.dataproc.v1.DeleteJobRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.ListJobsRequest} ListJobsRequest + * @returns {google.cloud.dataproc.v1.DeleteJobRequest} DeleteJobRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListJobsRequest.decodeDelimited = function decodeDelimited(reader) { + DeleteJobRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ListJobsRequest message. + * Verifies a DeleteJobRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @memberof google.cloud.dataproc.v1.DeleteJobRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ListJobsRequest.verify = function verify(message) { + DeleteJobRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.projectId != null && message.hasOwnProperty("projectId")) @@ -29959,183 +31989,244 @@ if (message.region != null && message.hasOwnProperty("region")) if (!$util.isString(message.region)) return "region: string expected"; - if (message.pageSize != null && message.hasOwnProperty("pageSize")) - if (!$util.isInteger(message.pageSize)) - return "pageSize: integer expected"; - if (message.pageToken != null && message.hasOwnProperty("pageToken")) - if (!$util.isString(message.pageToken)) - return "pageToken: string expected"; - if (message.clusterName != null && message.hasOwnProperty("clusterName")) - if (!$util.isString(message.clusterName)) - return "clusterName: string expected"; - if (message.jobStateMatcher != null && message.hasOwnProperty("jobStateMatcher")) - switch (message.jobStateMatcher) { - default: - return "jobStateMatcher: enum value expected"; - case 0: - case 1: - case 2: - break; - } - if (message.filter != null && message.hasOwnProperty("filter")) - if (!$util.isString(message.filter)) - return "filter: string expected"; + if (message.jobId != null && message.hasOwnProperty("jobId")) + if (!$util.isString(message.jobId)) + return "jobId: string expected"; return null; }; /** - * Creates a ListJobsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a DeleteJobRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.ListJobsRequest + * @memberof google.cloud.dataproc.v1.DeleteJobRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.ListJobsRequest} ListJobsRequest + * @returns {google.cloud.dataproc.v1.DeleteJobRequest} DeleteJobRequest */ - ListJobsRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.ListJobsRequest) + DeleteJobRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.DeleteJobRequest) return object; - var message = new $root.google.cloud.dataproc.v1.ListJobsRequest(); + var message = new $root.google.cloud.dataproc.v1.DeleteJobRequest(); if (object.projectId != null) message.projectId = String(object.projectId); if (object.region != null) message.region = String(object.region); - if (object.pageSize != null) - message.pageSize = object.pageSize | 0; - if (object.pageToken != null) - message.pageToken = String(object.pageToken); - if (object.clusterName != null) - message.clusterName = String(object.clusterName); - switch (object.jobStateMatcher) { - default: - if (typeof object.jobStateMatcher === "number") { - message.jobStateMatcher = object.jobStateMatcher; - break; - } - break; - case "ALL": - case 0: - message.jobStateMatcher = 0; - break; - case "ACTIVE": - case 1: - message.jobStateMatcher = 1; - break; - case "NON_ACTIVE": - case 2: - message.jobStateMatcher = 2; - break; - } - if (object.filter != null) - message.filter = String(object.filter); + if (object.jobId != null) + message.jobId = String(object.jobId); return message; }; /** - * Creates a plain object from a ListJobsRequest message. Also converts values to other types if specified. - * @function toObject - * @memberof google.cloud.dataproc.v1.ListJobsRequest - * @static - * @param {google.cloud.dataproc.v1.ListJobsRequest} message ListJobsRequest - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object + * Creates a plain object from a DeleteJobRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * @static + * @param {google.cloud.dataproc.v1.DeleteJobRequest} message DeleteJobRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DeleteJobRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.projectId = ""; + object.jobId = ""; + object.region = ""; + } + if (message.projectId != null && message.hasOwnProperty("projectId")) + object.projectId = message.projectId; + if (message.jobId != null && message.hasOwnProperty("jobId")) + object.jobId = message.jobId; + if (message.region != null && message.hasOwnProperty("region")) + object.region = message.region; + return object; + }; + + /** + * Converts this DeleteJobRequest to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * @instance + * @returns {Object.} JSON object + */ + DeleteJobRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DeleteJobRequest + * @function getTypeUrl + * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DeleteJobRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/google.cloud.dataproc.v1.DeleteJobRequest"; + }; + + return DeleteJobRequest; + })(); + + v1.NodeGroupController = (function() { + + /** + * Constructs a new NodeGroupController service. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a NodeGroupController + * @extends $protobuf.rpc.Service + * @constructor + * @param {$protobuf.RPCImpl} rpcImpl RPC implementation + * @param {boolean} [requestDelimited=false] Whether requests are length-delimited + * @param {boolean} [responseDelimited=false] Whether responses are length-delimited + */ + function NodeGroupController(rpcImpl, requestDelimited, responseDelimited) { + $protobuf.rpc.Service.call(this, rpcImpl, requestDelimited, responseDelimited); + } + + (NodeGroupController.prototype = Object.create($protobuf.rpc.Service.prototype)).constructor = NodeGroupController; + + /** + * Creates new NodeGroupController service using the specified rpc implementation. + * @function create + * @memberof google.cloud.dataproc.v1.NodeGroupController + * @static + * @param {$protobuf.RPCImpl} rpcImpl RPC implementation + * @param {boolean} [requestDelimited=false] Whether requests are length-delimited + * @param {boolean} [responseDelimited=false] Whether responses are length-delimited + * @returns {NodeGroupController} RPC service. Useful where requests and/or responses are streamed. + */ + NodeGroupController.create = function create(rpcImpl, requestDelimited, responseDelimited) { + return new this(rpcImpl, requestDelimited, responseDelimited); + }; + + /** + * Callback as used by {@link google.cloud.dataproc.v1.NodeGroupController|createNodeGroup}. + * @memberof google.cloud.dataproc.v1.NodeGroupController + * @typedef CreateNodeGroupCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {google.longrunning.Operation} [response] Operation + */ + + /** + * Calls CreateNodeGroup. + * @function createNodeGroup + * @memberof google.cloud.dataproc.v1.NodeGroupController + * @instance + * @param {google.cloud.dataproc.v1.ICreateNodeGroupRequest} request CreateNodeGroupRequest message or plain object + * @param {google.cloud.dataproc.v1.NodeGroupController.CreateNodeGroupCallback} callback Node-style callback called with the error, if any, and Operation + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(NodeGroupController.prototype.createNodeGroup = function createNodeGroup(request, callback) { + return this.rpcCall(createNodeGroup, $root.google.cloud.dataproc.v1.CreateNodeGroupRequest, $root.google.longrunning.Operation, request, callback); + }, "name", { value: "CreateNodeGroup" }); + + /** + * Calls CreateNodeGroup. + * @function createNodeGroup + * @memberof google.cloud.dataproc.v1.NodeGroupController + * @instance + * @param {google.cloud.dataproc.v1.ICreateNodeGroupRequest} request CreateNodeGroupRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + + /** + * Callback as used by {@link google.cloud.dataproc.v1.NodeGroupController|resizeNodeGroup}. + * @memberof google.cloud.dataproc.v1.NodeGroupController + * @typedef ResizeNodeGroupCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {google.longrunning.Operation} [response] Operation */ - ListJobsRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - var object = {}; - if (options.defaults) { - object.projectId = ""; - object.pageSize = 0; - object.pageToken = ""; - object.clusterName = ""; - object.jobStateMatcher = options.enums === String ? "ALL" : 0; - object.region = ""; - object.filter = ""; - } - if (message.projectId != null && message.hasOwnProperty("projectId")) - object.projectId = message.projectId; - if (message.pageSize != null && message.hasOwnProperty("pageSize")) - object.pageSize = message.pageSize; - if (message.pageToken != null && message.hasOwnProperty("pageToken")) - object.pageToken = message.pageToken; - if (message.clusterName != null && message.hasOwnProperty("clusterName")) - object.clusterName = message.clusterName; - if (message.jobStateMatcher != null && message.hasOwnProperty("jobStateMatcher")) - object.jobStateMatcher = options.enums === String ? $root.google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher[message.jobStateMatcher] === undefined ? message.jobStateMatcher : $root.google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher[message.jobStateMatcher] : message.jobStateMatcher; - if (message.region != null && message.hasOwnProperty("region")) - object.region = message.region; - if (message.filter != null && message.hasOwnProperty("filter")) - object.filter = message.filter; - return object; - }; /** - * Converts this ListJobsRequest to JSON. - * @function toJSON - * @memberof google.cloud.dataproc.v1.ListJobsRequest + * Calls ResizeNodeGroup. + * @function resizeNodeGroup + * @memberof google.cloud.dataproc.v1.NodeGroupController * @instance - * @returns {Object.} JSON object + * @param {google.cloud.dataproc.v1.IResizeNodeGroupRequest} request ResizeNodeGroupRequest message or plain object + * @param {google.cloud.dataproc.v1.NodeGroupController.ResizeNodeGroupCallback} callback Node-style callback called with the error, if any, and Operation + * @returns {undefined} + * @variation 1 */ - ListJobsRequest.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + Object.defineProperty(NodeGroupController.prototype.resizeNodeGroup = function resizeNodeGroup(request, callback) { + return this.rpcCall(resizeNodeGroup, $root.google.cloud.dataproc.v1.ResizeNodeGroupRequest, $root.google.longrunning.Operation, request, callback); + }, "name", { value: "ResizeNodeGroup" }); /** - * Gets the default type url for ListJobsRequest - * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.ListJobsRequest - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url + * Calls ResizeNodeGroup. + * @function resizeNodeGroup + * @memberof google.cloud.dataproc.v1.NodeGroupController + * @instance + * @param {google.cloud.dataproc.v1.IResizeNodeGroupRequest} request ResizeNodeGroupRequest message or plain object + * @returns {Promise} Promise + * @variation 2 */ - ListJobsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/google.cloud.dataproc.v1.ListJobsRequest"; - }; /** - * JobStateMatcher enum. - * @name google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher - * @enum {number} - * @property {number} ALL=0 ALL value - * @property {number} ACTIVE=1 ACTIVE value - * @property {number} NON_ACTIVE=2 NON_ACTIVE value + * Callback as used by {@link google.cloud.dataproc.v1.NodeGroupController|getNodeGroup}. + * @memberof google.cloud.dataproc.v1.NodeGroupController + * @typedef GetNodeGroupCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {google.cloud.dataproc.v1.NodeGroup} [response] NodeGroup */ - ListJobsRequest.JobStateMatcher = (function() { - var valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "ALL"] = 0; - values[valuesById[1] = "ACTIVE"] = 1; - values[valuesById[2] = "NON_ACTIVE"] = 2; - return values; - })(); - return ListJobsRequest; + /** + * Calls GetNodeGroup. + * @function getNodeGroup + * @memberof google.cloud.dataproc.v1.NodeGroupController + * @instance + * @param {google.cloud.dataproc.v1.IGetNodeGroupRequest} request GetNodeGroupRequest message or plain object + * @param {google.cloud.dataproc.v1.NodeGroupController.GetNodeGroupCallback} callback Node-style callback called with the error, if any, and NodeGroup + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(NodeGroupController.prototype.getNodeGroup = function getNodeGroup(request, callback) { + return this.rpcCall(getNodeGroup, $root.google.cloud.dataproc.v1.GetNodeGroupRequest, $root.google.cloud.dataproc.v1.NodeGroup, request, callback); + }, "name", { value: "GetNodeGroup" }); + + /** + * Calls GetNodeGroup. + * @function getNodeGroup + * @memberof google.cloud.dataproc.v1.NodeGroupController + * @instance + * @param {google.cloud.dataproc.v1.IGetNodeGroupRequest} request GetNodeGroupRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + + return NodeGroupController; })(); - v1.UpdateJobRequest = (function() { + v1.CreateNodeGroupRequest = (function() { /** - * Properties of an UpdateJobRequest. + * Properties of a CreateNodeGroupRequest. * @memberof google.cloud.dataproc.v1 - * @interface IUpdateJobRequest - * @property {string|null} [projectId] UpdateJobRequest projectId - * @property {string|null} [region] UpdateJobRequest region - * @property {string|null} [jobId] UpdateJobRequest jobId - * @property {google.cloud.dataproc.v1.IJob|null} [job] UpdateJobRequest job - * @property {google.protobuf.IFieldMask|null} [updateMask] UpdateJobRequest updateMask + * @interface ICreateNodeGroupRequest + * @property {string|null} [parent] CreateNodeGroupRequest parent + * @property {google.cloud.dataproc.v1.INodeGroup|null} [nodeGroup] CreateNodeGroupRequest nodeGroup + * @property {string|null} [nodeGroupId] CreateNodeGroupRequest nodeGroupId + * @property {string|null} [requestId] CreateNodeGroupRequest requestId */ /** - * Constructs a new UpdateJobRequest. + * Constructs a new CreateNodeGroupRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents an UpdateJobRequest. - * @implements IUpdateJobRequest + * @classdesc Represents a CreateNodeGroupRequest. + * @implements ICreateNodeGroupRequest * @constructor - * @param {google.cloud.dataproc.v1.IUpdateJobRequest=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.ICreateNodeGroupRequest=} [properties] Properties to set */ - function UpdateJobRequest(properties) { + function CreateNodeGroupRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -30143,131 +32234,117 @@ } /** - * UpdateJobRequest projectId. - * @member {string} projectId - * @memberof google.cloud.dataproc.v1.UpdateJobRequest - * @instance - */ - UpdateJobRequest.prototype.projectId = ""; - - /** - * UpdateJobRequest region. - * @member {string} region - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * CreateNodeGroupRequest parent. + * @member {string} parent + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @instance */ - UpdateJobRequest.prototype.region = ""; + CreateNodeGroupRequest.prototype.parent = ""; /** - * UpdateJobRequest jobId. - * @member {string} jobId - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * CreateNodeGroupRequest nodeGroup. + * @member {google.cloud.dataproc.v1.INodeGroup|null|undefined} nodeGroup + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @instance */ - UpdateJobRequest.prototype.jobId = ""; + CreateNodeGroupRequest.prototype.nodeGroup = null; /** - * UpdateJobRequest job. - * @member {google.cloud.dataproc.v1.IJob|null|undefined} job - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * CreateNodeGroupRequest nodeGroupId. + * @member {string} nodeGroupId + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @instance */ - UpdateJobRequest.prototype.job = null; + CreateNodeGroupRequest.prototype.nodeGroupId = ""; /** - * UpdateJobRequest updateMask. - * @member {google.protobuf.IFieldMask|null|undefined} updateMask - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * CreateNodeGroupRequest requestId. + * @member {string} requestId + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @instance */ - UpdateJobRequest.prototype.updateMask = null; + CreateNodeGroupRequest.prototype.requestId = ""; /** - * Creates a new UpdateJobRequest instance using the specified properties. + * Creates a new CreateNodeGroupRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @static - * @param {google.cloud.dataproc.v1.IUpdateJobRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.UpdateJobRequest} UpdateJobRequest instance + * @param {google.cloud.dataproc.v1.ICreateNodeGroupRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.CreateNodeGroupRequest} CreateNodeGroupRequest instance */ - UpdateJobRequest.create = function create(properties) { - return new UpdateJobRequest(properties); + CreateNodeGroupRequest.create = function create(properties) { + return new CreateNodeGroupRequest(properties); }; /** - * Encodes the specified UpdateJobRequest message. Does not implicitly {@link google.cloud.dataproc.v1.UpdateJobRequest.verify|verify} messages. + * Encodes the specified CreateNodeGroupRequest message. Does not implicitly {@link google.cloud.dataproc.v1.CreateNodeGroupRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @static - * @param {google.cloud.dataproc.v1.IUpdateJobRequest} message UpdateJobRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.ICreateNodeGroupRequest} message CreateNodeGroupRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateJobRequest.encode = function encode(message, writer) { + CreateNodeGroupRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); - if (message.region != null && Object.hasOwnProperty.call(message, "region")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.region); - if (message.jobId != null && Object.hasOwnProperty.call(message, "jobId")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.jobId); - if (message.job != null && Object.hasOwnProperty.call(message, "job")) - $root.google.cloud.dataproc.v1.Job.encode(message.job, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.updateMask != null && Object.hasOwnProperty.call(message, "updateMask")) - $root.google.protobuf.FieldMask.encode(message.updateMask, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.parent != null && Object.hasOwnProperty.call(message, "parent")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.parent); + if (message.nodeGroup != null && Object.hasOwnProperty.call(message, "nodeGroup")) + $root.google.cloud.dataproc.v1.NodeGroup.encode(message.nodeGroup, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.requestId != null && Object.hasOwnProperty.call(message, "requestId")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.requestId); + if (message.nodeGroupId != null && Object.hasOwnProperty.call(message, "nodeGroupId")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.nodeGroupId); return writer; }; /** - * Encodes the specified UpdateJobRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.UpdateJobRequest.verify|verify} messages. + * Encodes the specified CreateNodeGroupRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.CreateNodeGroupRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @static - * @param {google.cloud.dataproc.v1.IUpdateJobRequest} message UpdateJobRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.ICreateNodeGroupRequest} message CreateNodeGroupRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateJobRequest.encodeDelimited = function encodeDelimited(message, writer) { + CreateNodeGroupRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an UpdateJobRequest message from the specified reader or buffer. + * Decodes a CreateNodeGroupRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.UpdateJobRequest} UpdateJobRequest + * @returns {google.cloud.dataproc.v1.CreateNodeGroupRequest} CreateNodeGroupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateJobRequest.decode = function decode(reader, length) { + CreateNodeGroupRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.UpdateJobRequest(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.CreateNodeGroupRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.projectId = reader.string(); + message.parent = reader.string(); break; } case 2: { - message.region = reader.string(); - break; - } - case 3: { - message.jobId = reader.string(); + message.nodeGroup = $root.google.cloud.dataproc.v1.NodeGroup.decode(reader, reader.uint32()); break; } case 4: { - message.job = $root.google.cloud.dataproc.v1.Job.decode(reader, reader.uint32()); + message.nodeGroupId = reader.string(); break; } - case 5: { - message.updateMask = $root.google.protobuf.FieldMask.decode(reader, reader.uint32()); + case 3: { + message.requestId = reader.string(); break; } default: @@ -30279,167 +32356,155 @@ }; /** - * Decodes an UpdateJobRequest message from the specified reader or buffer, length delimited. + * Decodes a CreateNodeGroupRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.UpdateJobRequest} UpdateJobRequest + * @returns {google.cloud.dataproc.v1.CreateNodeGroupRequest} CreateNodeGroupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateJobRequest.decodeDelimited = function decodeDelimited(reader) { + CreateNodeGroupRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an UpdateJobRequest message. + * Verifies a CreateNodeGroupRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - UpdateJobRequest.verify = function verify(message) { + CreateNodeGroupRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.projectId != null && message.hasOwnProperty("projectId")) - if (!$util.isString(message.projectId)) - return "projectId: string expected"; - if (message.region != null && message.hasOwnProperty("region")) - if (!$util.isString(message.region)) - return "region: string expected"; - if (message.jobId != null && message.hasOwnProperty("jobId")) - if (!$util.isString(message.jobId)) - return "jobId: string expected"; - if (message.job != null && message.hasOwnProperty("job")) { - var error = $root.google.cloud.dataproc.v1.Job.verify(message.job); - if (error) - return "job." + error; - } - if (message.updateMask != null && message.hasOwnProperty("updateMask")) { - var error = $root.google.protobuf.FieldMask.verify(message.updateMask); + if (message.parent != null && message.hasOwnProperty("parent")) + if (!$util.isString(message.parent)) + return "parent: string expected"; + if (message.nodeGroup != null && message.hasOwnProperty("nodeGroup")) { + var error = $root.google.cloud.dataproc.v1.NodeGroup.verify(message.nodeGroup); if (error) - return "updateMask." + error; + return "nodeGroup." + error; } + if (message.nodeGroupId != null && message.hasOwnProperty("nodeGroupId")) + if (!$util.isString(message.nodeGroupId)) + return "nodeGroupId: string expected"; + if (message.requestId != null && message.hasOwnProperty("requestId")) + if (!$util.isString(message.requestId)) + return "requestId: string expected"; return null; }; /** - * Creates an UpdateJobRequest message from a plain object. Also converts values to their respective internal types. + * Creates a CreateNodeGroupRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.UpdateJobRequest} UpdateJobRequest + * @returns {google.cloud.dataproc.v1.CreateNodeGroupRequest} CreateNodeGroupRequest */ - UpdateJobRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.UpdateJobRequest) + CreateNodeGroupRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.CreateNodeGroupRequest) return object; - var message = new $root.google.cloud.dataproc.v1.UpdateJobRequest(); - if (object.projectId != null) - message.projectId = String(object.projectId); - if (object.region != null) - message.region = String(object.region); - if (object.jobId != null) - message.jobId = String(object.jobId); - if (object.job != null) { - if (typeof object.job !== "object") - throw TypeError(".google.cloud.dataproc.v1.UpdateJobRequest.job: object expected"); - message.job = $root.google.cloud.dataproc.v1.Job.fromObject(object.job); - } - if (object.updateMask != null) { - if (typeof object.updateMask !== "object") - throw TypeError(".google.cloud.dataproc.v1.UpdateJobRequest.updateMask: object expected"); - message.updateMask = $root.google.protobuf.FieldMask.fromObject(object.updateMask); + var message = new $root.google.cloud.dataproc.v1.CreateNodeGroupRequest(); + if (object.parent != null) + message.parent = String(object.parent); + if (object.nodeGroup != null) { + if (typeof object.nodeGroup !== "object") + throw TypeError(".google.cloud.dataproc.v1.CreateNodeGroupRequest.nodeGroup: object expected"); + message.nodeGroup = $root.google.cloud.dataproc.v1.NodeGroup.fromObject(object.nodeGroup); } + if (object.nodeGroupId != null) + message.nodeGroupId = String(object.nodeGroupId); + if (object.requestId != null) + message.requestId = String(object.requestId); return message; }; /** - * Creates a plain object from an UpdateJobRequest message. Also converts values to other types if specified. + * Creates a plain object from a CreateNodeGroupRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @static - * @param {google.cloud.dataproc.v1.UpdateJobRequest} message UpdateJobRequest + * @param {google.cloud.dataproc.v1.CreateNodeGroupRequest} message CreateNodeGroupRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - UpdateJobRequest.toObject = function toObject(message, options) { + CreateNodeGroupRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { - object.projectId = ""; - object.region = ""; - object.jobId = ""; - object.job = null; - object.updateMask = null; + object.parent = ""; + object.nodeGroup = null; + object.requestId = ""; + object.nodeGroupId = ""; } - if (message.projectId != null && message.hasOwnProperty("projectId")) - object.projectId = message.projectId; - if (message.region != null && message.hasOwnProperty("region")) - object.region = message.region; - if (message.jobId != null && message.hasOwnProperty("jobId")) - object.jobId = message.jobId; - if (message.job != null && message.hasOwnProperty("job")) - object.job = $root.google.cloud.dataproc.v1.Job.toObject(message.job, options); - if (message.updateMask != null && message.hasOwnProperty("updateMask")) - object.updateMask = $root.google.protobuf.FieldMask.toObject(message.updateMask, options); + if (message.parent != null && message.hasOwnProperty("parent")) + object.parent = message.parent; + if (message.nodeGroup != null && message.hasOwnProperty("nodeGroup")) + object.nodeGroup = $root.google.cloud.dataproc.v1.NodeGroup.toObject(message.nodeGroup, options); + if (message.requestId != null && message.hasOwnProperty("requestId")) + object.requestId = message.requestId; + if (message.nodeGroupId != null && message.hasOwnProperty("nodeGroupId")) + object.nodeGroupId = message.nodeGroupId; return object; }; /** - * Converts this UpdateJobRequest to JSON. + * Converts this CreateNodeGroupRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @instance * @returns {Object.} JSON object */ - UpdateJobRequest.prototype.toJSON = function toJSON() { + CreateNodeGroupRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for UpdateJobRequest + * Gets the default type url for CreateNodeGroupRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.UpdateJobRequest + * @memberof google.cloud.dataproc.v1.CreateNodeGroupRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - UpdateJobRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CreateNodeGroupRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.UpdateJobRequest"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.CreateNodeGroupRequest"; }; - return UpdateJobRequest; + return CreateNodeGroupRequest; })(); - v1.ListJobsResponse = (function() { + v1.ResizeNodeGroupRequest = (function() { /** - * Properties of a ListJobsResponse. + * Properties of a ResizeNodeGroupRequest. * @memberof google.cloud.dataproc.v1 - * @interface IListJobsResponse - * @property {Array.|null} [jobs] ListJobsResponse jobs - * @property {string|null} [nextPageToken] ListJobsResponse nextPageToken + * @interface IResizeNodeGroupRequest + * @property {string|null} [name] ResizeNodeGroupRequest name + * @property {number|null} [size] ResizeNodeGroupRequest size + * @property {string|null} [requestId] ResizeNodeGroupRequest requestId + * @property {google.protobuf.IDuration|null} [gracefulDecommissionTimeout] ResizeNodeGroupRequest gracefulDecommissionTimeout */ /** - * Constructs a new ListJobsResponse. + * Constructs a new ResizeNodeGroupRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a ListJobsResponse. - * @implements IListJobsResponse + * @classdesc Represents a ResizeNodeGroupRequest. + * @implements IResizeNodeGroupRequest * @constructor - * @param {google.cloud.dataproc.v1.IListJobsResponse=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IResizeNodeGroupRequest=} [properties] Properties to set */ - function ListJobsResponse(properties) { - this.jobs = []; + function ResizeNodeGroupRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -30447,92 +32512,117 @@ } /** - * ListJobsResponse jobs. - * @member {Array.} jobs - * @memberof google.cloud.dataproc.v1.ListJobsResponse + * ResizeNodeGroupRequest name. + * @member {string} name + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest * @instance */ - ListJobsResponse.prototype.jobs = $util.emptyArray; + ResizeNodeGroupRequest.prototype.name = ""; /** - * ListJobsResponse nextPageToken. - * @member {string} nextPageToken - * @memberof google.cloud.dataproc.v1.ListJobsResponse + * ResizeNodeGroupRequest size. + * @member {number} size + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest * @instance */ - ListJobsResponse.prototype.nextPageToken = ""; + ResizeNodeGroupRequest.prototype.size = 0; /** - * Creates a new ListJobsResponse instance using the specified properties. + * ResizeNodeGroupRequest requestId. + * @member {string} requestId + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest + * @instance + */ + ResizeNodeGroupRequest.prototype.requestId = ""; + + /** + * ResizeNodeGroupRequest gracefulDecommissionTimeout. + * @member {google.protobuf.IDuration|null|undefined} gracefulDecommissionTimeout + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest + * @instance + */ + ResizeNodeGroupRequest.prototype.gracefulDecommissionTimeout = null; + + /** + * Creates a new ResizeNodeGroupRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.ListJobsResponse + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest * @static - * @param {google.cloud.dataproc.v1.IListJobsResponse=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.ListJobsResponse} ListJobsResponse instance + * @param {google.cloud.dataproc.v1.IResizeNodeGroupRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.ResizeNodeGroupRequest} ResizeNodeGroupRequest instance */ - ListJobsResponse.create = function create(properties) { - return new ListJobsResponse(properties); + ResizeNodeGroupRequest.create = function create(properties) { + return new ResizeNodeGroupRequest(properties); }; /** - * Encodes the specified ListJobsResponse message. Does not implicitly {@link google.cloud.dataproc.v1.ListJobsResponse.verify|verify} messages. + * Encodes the specified ResizeNodeGroupRequest message. Does not implicitly {@link google.cloud.dataproc.v1.ResizeNodeGroupRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.ListJobsResponse + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest * @static - * @param {google.cloud.dataproc.v1.IListJobsResponse} message ListJobsResponse message or plain object to encode + * @param {google.cloud.dataproc.v1.IResizeNodeGroupRequest} message ResizeNodeGroupRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ListJobsResponse.encode = function encode(message, writer) { + ResizeNodeGroupRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.jobs != null && message.jobs.length) - for (var i = 0; i < message.jobs.length; ++i) - $root.google.cloud.dataproc.v1.Job.encode(message.jobs[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.nextPageToken != null && Object.hasOwnProperty.call(message, "nextPageToken")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.nextPageToken); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.size != null && Object.hasOwnProperty.call(message, "size")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.size); + if (message.requestId != null && Object.hasOwnProperty.call(message, "requestId")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.requestId); + if (message.gracefulDecommissionTimeout != null && Object.hasOwnProperty.call(message, "gracefulDecommissionTimeout")) + $root.google.protobuf.Duration.encode(message.gracefulDecommissionTimeout, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified ListJobsResponse message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ListJobsResponse.verify|verify} messages. + * Encodes the specified ResizeNodeGroupRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ResizeNodeGroupRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.ListJobsResponse + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest * @static - * @param {google.cloud.dataproc.v1.IListJobsResponse} message ListJobsResponse message or plain object to encode + * @param {google.cloud.dataproc.v1.IResizeNodeGroupRequest} message ResizeNodeGroupRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ListJobsResponse.encodeDelimited = function encodeDelimited(message, writer) { + ResizeNodeGroupRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ListJobsResponse message from the specified reader or buffer. + * Decodes a ResizeNodeGroupRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.ListJobsResponse + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.ListJobsResponse} ListJobsResponse + * @returns {google.cloud.dataproc.v1.ResizeNodeGroupRequest} ResizeNodeGroupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListJobsResponse.decode = function decode(reader, length) { + ResizeNodeGroupRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ListJobsResponse(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ResizeNodeGroupRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.jobs && message.jobs.length)) - message.jobs = []; - message.jobs.push($root.google.cloud.dataproc.v1.Job.decode(reader, reader.uint32())); + message.name = reader.string(); break; } case 2: { - message.nextPageToken = reader.string(); + message.size = reader.int32(); + break; + } + case 3: { + message.requestId = reader.string(); + break; + } + case 4: { + message.gracefulDecommissionTimeout = $root.google.protobuf.Duration.decode(reader, reader.uint32()); break; } default: @@ -30544,150 +32634,152 @@ }; /** - * Decodes a ListJobsResponse message from the specified reader or buffer, length delimited. + * Decodes a ResizeNodeGroupRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.ListJobsResponse + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.ListJobsResponse} ListJobsResponse + * @returns {google.cloud.dataproc.v1.ResizeNodeGroupRequest} ResizeNodeGroupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ListJobsResponse.decodeDelimited = function decodeDelimited(reader) { + ResizeNodeGroupRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ListJobsResponse message. + * Verifies a ResizeNodeGroupRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.ListJobsResponse + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ListJobsResponse.verify = function verify(message) { + ResizeNodeGroupRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.jobs != null && message.hasOwnProperty("jobs")) { - if (!Array.isArray(message.jobs)) - return "jobs: array expected"; - for (var i = 0; i < message.jobs.length; ++i) { - var error = $root.google.cloud.dataproc.v1.Job.verify(message.jobs[i]); - if (error) - return "jobs." + error; - } + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.size != null && message.hasOwnProperty("size")) + if (!$util.isInteger(message.size)) + return "size: integer expected"; + if (message.requestId != null && message.hasOwnProperty("requestId")) + if (!$util.isString(message.requestId)) + return "requestId: string expected"; + if (message.gracefulDecommissionTimeout != null && message.hasOwnProperty("gracefulDecommissionTimeout")) { + var error = $root.google.protobuf.Duration.verify(message.gracefulDecommissionTimeout); + if (error) + return "gracefulDecommissionTimeout." + error; } - if (message.nextPageToken != null && message.hasOwnProperty("nextPageToken")) - if (!$util.isString(message.nextPageToken)) - return "nextPageToken: string expected"; return null; }; /** - * Creates a ListJobsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ResizeNodeGroupRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.ListJobsResponse + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.ListJobsResponse} ListJobsResponse + * @returns {google.cloud.dataproc.v1.ResizeNodeGroupRequest} ResizeNodeGroupRequest */ - ListJobsResponse.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.ListJobsResponse) + ResizeNodeGroupRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.ResizeNodeGroupRequest) return object; - var message = new $root.google.cloud.dataproc.v1.ListJobsResponse(); - if (object.jobs) { - if (!Array.isArray(object.jobs)) - throw TypeError(".google.cloud.dataproc.v1.ListJobsResponse.jobs: array expected"); - message.jobs = []; - for (var i = 0; i < object.jobs.length; ++i) { - if (typeof object.jobs[i] !== "object") - throw TypeError(".google.cloud.dataproc.v1.ListJobsResponse.jobs: object expected"); - message.jobs[i] = $root.google.cloud.dataproc.v1.Job.fromObject(object.jobs[i]); - } + var message = new $root.google.cloud.dataproc.v1.ResizeNodeGroupRequest(); + if (object.name != null) + message.name = String(object.name); + if (object.size != null) + message.size = object.size | 0; + if (object.requestId != null) + message.requestId = String(object.requestId); + if (object.gracefulDecommissionTimeout != null) { + if (typeof object.gracefulDecommissionTimeout !== "object") + throw TypeError(".google.cloud.dataproc.v1.ResizeNodeGroupRequest.gracefulDecommissionTimeout: object expected"); + message.gracefulDecommissionTimeout = $root.google.protobuf.Duration.fromObject(object.gracefulDecommissionTimeout); } - if (object.nextPageToken != null) - message.nextPageToken = String(object.nextPageToken); return message; }; /** - * Creates a plain object from a ListJobsResponse message. Also converts values to other types if specified. + * Creates a plain object from a ResizeNodeGroupRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.ListJobsResponse + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest * @static - * @param {google.cloud.dataproc.v1.ListJobsResponse} message ListJobsResponse + * @param {google.cloud.dataproc.v1.ResizeNodeGroupRequest} message ResizeNodeGroupRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ListJobsResponse.toObject = function toObject(message, options) { + ResizeNodeGroupRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.arrays || options.defaults) - object.jobs = []; - if (options.defaults) - object.nextPageToken = ""; - if (message.jobs && message.jobs.length) { - object.jobs = []; - for (var j = 0; j < message.jobs.length; ++j) - object.jobs[j] = $root.google.cloud.dataproc.v1.Job.toObject(message.jobs[j], options); + if (options.defaults) { + object.name = ""; + object.size = 0; + object.requestId = ""; + object.gracefulDecommissionTimeout = null; } - if (message.nextPageToken != null && message.hasOwnProperty("nextPageToken")) - object.nextPageToken = message.nextPageToken; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.size != null && message.hasOwnProperty("size")) + object.size = message.size; + if (message.requestId != null && message.hasOwnProperty("requestId")) + object.requestId = message.requestId; + if (message.gracefulDecommissionTimeout != null && message.hasOwnProperty("gracefulDecommissionTimeout")) + object.gracefulDecommissionTimeout = $root.google.protobuf.Duration.toObject(message.gracefulDecommissionTimeout, options); return object; }; /** - * Converts this ListJobsResponse to JSON. + * Converts this ResizeNodeGroupRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.ListJobsResponse + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest * @instance * @returns {Object.} JSON object */ - ListJobsResponse.prototype.toJSON = function toJSON() { + ResizeNodeGroupRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ListJobsResponse + * Gets the default type url for ResizeNodeGroupRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.ListJobsResponse + * @memberof google.cloud.dataproc.v1.ResizeNodeGroupRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ListJobsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ResizeNodeGroupRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.ListJobsResponse"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.ResizeNodeGroupRequest"; }; - return ListJobsResponse; + return ResizeNodeGroupRequest; })(); - v1.CancelJobRequest = (function() { + v1.GetNodeGroupRequest = (function() { /** - * Properties of a CancelJobRequest. + * Properties of a GetNodeGroupRequest. * @memberof google.cloud.dataproc.v1 - * @interface ICancelJobRequest - * @property {string|null} [projectId] CancelJobRequest projectId - * @property {string|null} [region] CancelJobRequest region - * @property {string|null} [jobId] CancelJobRequest jobId + * @interface IGetNodeGroupRequest + * @property {string|null} [name] GetNodeGroupRequest name */ /** - * Constructs a new CancelJobRequest. + * Constructs a new GetNodeGroupRequest. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a CancelJobRequest. - * @implements ICancelJobRequest + * @classdesc Represents a GetNodeGroupRequest. + * @implements IGetNodeGroupRequest * @constructor - * @param {google.cloud.dataproc.v1.ICancelJobRequest=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IGetNodeGroupRequest=} [properties] Properties to set */ - function CancelJobRequest(properties) { + function GetNodeGroupRequest(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -30695,103 +32787,75 @@ } /** - * CancelJobRequest projectId. - * @member {string} projectId - * @memberof google.cloud.dataproc.v1.CancelJobRequest - * @instance - */ - CancelJobRequest.prototype.projectId = ""; - - /** - * CancelJobRequest region. - * @member {string} region - * @memberof google.cloud.dataproc.v1.CancelJobRequest - * @instance - */ - CancelJobRequest.prototype.region = ""; - - /** - * CancelJobRequest jobId. - * @member {string} jobId - * @memberof google.cloud.dataproc.v1.CancelJobRequest + * GetNodeGroupRequest name. + * @member {string} name + * @memberof google.cloud.dataproc.v1.GetNodeGroupRequest * @instance */ - CancelJobRequest.prototype.jobId = ""; + GetNodeGroupRequest.prototype.name = ""; /** - * Creates a new CancelJobRequest instance using the specified properties. + * Creates a new GetNodeGroupRequest instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.CancelJobRequest + * @memberof google.cloud.dataproc.v1.GetNodeGroupRequest * @static - * @param {google.cloud.dataproc.v1.ICancelJobRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.CancelJobRequest} CancelJobRequest instance + * @param {google.cloud.dataproc.v1.IGetNodeGroupRequest=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.GetNodeGroupRequest} GetNodeGroupRequest instance */ - CancelJobRequest.create = function create(properties) { - return new CancelJobRequest(properties); + GetNodeGroupRequest.create = function create(properties) { + return new GetNodeGroupRequest(properties); }; /** - * Encodes the specified CancelJobRequest message. Does not implicitly {@link google.cloud.dataproc.v1.CancelJobRequest.verify|verify} messages. + * Encodes the specified GetNodeGroupRequest message. Does not implicitly {@link google.cloud.dataproc.v1.GetNodeGroupRequest.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.CancelJobRequest + * @memberof google.cloud.dataproc.v1.GetNodeGroupRequest * @static - * @param {google.cloud.dataproc.v1.ICancelJobRequest} message CancelJobRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IGetNodeGroupRequest} message GetNodeGroupRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CancelJobRequest.encode = function encode(message, writer) { + GetNodeGroupRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); - if (message.jobId != null && Object.hasOwnProperty.call(message, "jobId")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.jobId); - if (message.region != null && Object.hasOwnProperty.call(message, "region")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); return writer; }; /** - * Encodes the specified CancelJobRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.CancelJobRequest.verify|verify} messages. + * Encodes the specified GetNodeGroupRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.GetNodeGroupRequest.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.CancelJobRequest + * @memberof google.cloud.dataproc.v1.GetNodeGroupRequest * @static - * @param {google.cloud.dataproc.v1.ICancelJobRequest} message CancelJobRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IGetNodeGroupRequest} message GetNodeGroupRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CancelJobRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetNodeGroupRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a CancelJobRequest message from the specified reader or buffer. + * Decodes a GetNodeGroupRequest message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.CancelJobRequest + * @memberof google.cloud.dataproc.v1.GetNodeGroupRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.CancelJobRequest} CancelJobRequest + * @returns {google.cloud.dataproc.v1.GetNodeGroupRequest} GetNodeGroupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CancelJobRequest.decode = function decode(reader, length) { + GetNodeGroupRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.CancelJobRequest(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.GetNodeGroupRequest(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.projectId = reader.string(); - break; - } - case 3: { - message.region = reader.string(); - break; - } - case 2: { - message.jobId = reader.string(); + message.name = reader.string(); break; } default: @@ -30803,141 +32867,131 @@ }; /** - * Decodes a CancelJobRequest message from the specified reader or buffer, length delimited. + * Decodes a GetNodeGroupRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.CancelJobRequest + * @memberof google.cloud.dataproc.v1.GetNodeGroupRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.CancelJobRequest} CancelJobRequest + * @returns {google.cloud.dataproc.v1.GetNodeGroupRequest} GetNodeGroupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CancelJobRequest.decodeDelimited = function decodeDelimited(reader) { + GetNodeGroupRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a CancelJobRequest message. + * Verifies a GetNodeGroupRequest message. * @function verify - * @memberof google.cloud.dataproc.v1.CancelJobRequest + * @memberof google.cloud.dataproc.v1.GetNodeGroupRequest * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - CancelJobRequest.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.projectId != null && message.hasOwnProperty("projectId")) - if (!$util.isString(message.projectId)) - return "projectId: string expected"; - if (message.region != null && message.hasOwnProperty("region")) - if (!$util.isString(message.region)) - return "region: string expected"; - if (message.jobId != null && message.hasOwnProperty("jobId")) - if (!$util.isString(message.jobId)) - return "jobId: string expected"; + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + GetNodeGroupRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; return null; }; /** - * Creates a CancelJobRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetNodeGroupRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.CancelJobRequest + * @memberof google.cloud.dataproc.v1.GetNodeGroupRequest * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.CancelJobRequest} CancelJobRequest + * @returns {google.cloud.dataproc.v1.GetNodeGroupRequest} GetNodeGroupRequest */ - CancelJobRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.CancelJobRequest) + GetNodeGroupRequest.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.GetNodeGroupRequest) return object; - var message = new $root.google.cloud.dataproc.v1.CancelJobRequest(); - if (object.projectId != null) - message.projectId = String(object.projectId); - if (object.region != null) - message.region = String(object.region); - if (object.jobId != null) - message.jobId = String(object.jobId); + var message = new $root.google.cloud.dataproc.v1.GetNodeGroupRequest(); + if (object.name != null) + message.name = String(object.name); return message; }; /** - * Creates a plain object from a CancelJobRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetNodeGroupRequest message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.CancelJobRequest + * @memberof google.cloud.dataproc.v1.GetNodeGroupRequest * @static - * @param {google.cloud.dataproc.v1.CancelJobRequest} message CancelJobRequest + * @param {google.cloud.dataproc.v1.GetNodeGroupRequest} message GetNodeGroupRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - CancelJobRequest.toObject = function toObject(message, options) { + GetNodeGroupRequest.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.defaults) { - object.projectId = ""; - object.jobId = ""; - object.region = ""; - } - if (message.projectId != null && message.hasOwnProperty("projectId")) - object.projectId = message.projectId; - if (message.jobId != null && message.hasOwnProperty("jobId")) - object.jobId = message.jobId; - if (message.region != null && message.hasOwnProperty("region")) - object.region = message.region; + if (options.defaults) + object.name = ""; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; return object; }; /** - * Converts this CancelJobRequest to JSON. + * Converts this GetNodeGroupRequest to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.CancelJobRequest + * @memberof google.cloud.dataproc.v1.GetNodeGroupRequest * @instance * @returns {Object.} JSON object */ - CancelJobRequest.prototype.toJSON = function toJSON() { + GetNodeGroupRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for CancelJobRequest + * Gets the default type url for GetNodeGroupRequest * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.CancelJobRequest + * @memberof google.cloud.dataproc.v1.GetNodeGroupRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - CancelJobRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetNodeGroupRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.CancelJobRequest"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.GetNodeGroupRequest"; }; - return CancelJobRequest; + return GetNodeGroupRequest; })(); - v1.DeleteJobRequest = (function() { + v1.BatchOperationMetadata = (function() { /** - * Properties of a DeleteJobRequest. + * Properties of a BatchOperationMetadata. * @memberof google.cloud.dataproc.v1 - * @interface IDeleteJobRequest - * @property {string|null} [projectId] DeleteJobRequest projectId - * @property {string|null} [region] DeleteJobRequest region - * @property {string|null} [jobId] DeleteJobRequest jobId + * @interface IBatchOperationMetadata + * @property {string|null} [batch] BatchOperationMetadata batch + * @property {string|null} [batchUuid] BatchOperationMetadata batchUuid + * @property {google.protobuf.ITimestamp|null} [createTime] BatchOperationMetadata createTime + * @property {google.protobuf.ITimestamp|null} [doneTime] BatchOperationMetadata doneTime + * @property {google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType|null} [operationType] BatchOperationMetadata operationType + * @property {string|null} [description] BatchOperationMetadata description + * @property {Object.|null} [labels] BatchOperationMetadata labels + * @property {Array.|null} [warnings] BatchOperationMetadata warnings */ /** - * Constructs a new DeleteJobRequest. + * Constructs a new BatchOperationMetadata. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a DeleteJobRequest. - * @implements IDeleteJobRequest + * @classdesc Represents a BatchOperationMetadata. + * @implements IBatchOperationMetadata * @constructor - * @param {google.cloud.dataproc.v1.IDeleteJobRequest=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IBatchOperationMetadata=} [properties] Properties to set */ - function DeleteJobRequest(properties) { + function BatchOperationMetadata(properties) { + this.labels = {}; + this.warnings = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -30945,103 +32999,196 @@ } /** - * DeleteJobRequest projectId. - * @member {string} projectId - * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * BatchOperationMetadata batch. + * @member {string} batch + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata * @instance */ - DeleteJobRequest.prototype.projectId = ""; + BatchOperationMetadata.prototype.batch = ""; /** - * DeleteJobRequest region. - * @member {string} region - * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * BatchOperationMetadata batchUuid. + * @member {string} batchUuid + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata * @instance */ - DeleteJobRequest.prototype.region = ""; + BatchOperationMetadata.prototype.batchUuid = ""; /** - * DeleteJobRequest jobId. - * @member {string} jobId - * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * BatchOperationMetadata createTime. + * @member {google.protobuf.ITimestamp|null|undefined} createTime + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata * @instance */ - DeleteJobRequest.prototype.jobId = ""; + BatchOperationMetadata.prototype.createTime = null; /** - * Creates a new DeleteJobRequest instance using the specified properties. + * BatchOperationMetadata doneTime. + * @member {google.protobuf.ITimestamp|null|undefined} doneTime + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @instance + */ + BatchOperationMetadata.prototype.doneTime = null; + + /** + * BatchOperationMetadata operationType. + * @member {google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType} operationType + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @instance + */ + BatchOperationMetadata.prototype.operationType = 0; + + /** + * BatchOperationMetadata description. + * @member {string} description + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @instance + */ + BatchOperationMetadata.prototype.description = ""; + + /** + * BatchOperationMetadata labels. + * @member {Object.} labels + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @instance + */ + BatchOperationMetadata.prototype.labels = $util.emptyObject; + + /** + * BatchOperationMetadata warnings. + * @member {Array.} warnings + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @instance + */ + BatchOperationMetadata.prototype.warnings = $util.emptyArray; + + /** + * Creates a new BatchOperationMetadata instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata * @static - * @param {google.cloud.dataproc.v1.IDeleteJobRequest=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.DeleteJobRequest} DeleteJobRequest instance + * @param {google.cloud.dataproc.v1.IBatchOperationMetadata=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.BatchOperationMetadata} BatchOperationMetadata instance */ - DeleteJobRequest.create = function create(properties) { - return new DeleteJobRequest(properties); + BatchOperationMetadata.create = function create(properties) { + return new BatchOperationMetadata(properties); }; /** - * Encodes the specified DeleteJobRequest message. Does not implicitly {@link google.cloud.dataproc.v1.DeleteJobRequest.verify|verify} messages. + * Encodes the specified BatchOperationMetadata message. Does not implicitly {@link google.cloud.dataproc.v1.BatchOperationMetadata.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata * @static - * @param {google.cloud.dataproc.v1.IDeleteJobRequest} message DeleteJobRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IBatchOperationMetadata} message BatchOperationMetadata message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteJobRequest.encode = function encode(message, writer) { + BatchOperationMetadata.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.projectId != null && Object.hasOwnProperty.call(message, "projectId")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.projectId); - if (message.jobId != null && Object.hasOwnProperty.call(message, "jobId")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.jobId); - if (message.region != null && Object.hasOwnProperty.call(message, "region")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.region); + if (message.batch != null && Object.hasOwnProperty.call(message, "batch")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.batch); + if (message.batchUuid != null && Object.hasOwnProperty.call(message, "batchUuid")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.batchUuid); + if (message.createTime != null && Object.hasOwnProperty.call(message, "createTime")) + $root.google.protobuf.Timestamp.encode(message.createTime, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.doneTime != null && Object.hasOwnProperty.call(message, "doneTime")) + $root.google.protobuf.Timestamp.encode(message.doneTime, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.operationType != null && Object.hasOwnProperty.call(message, "operationType")) + writer.uint32(/* id 6, wireType 0 =*/48).int32(message.operationType); + if (message.description != null && Object.hasOwnProperty.call(message, "description")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.description); + if (message.labels != null && Object.hasOwnProperty.call(message, "labels")) + for (var keys = Object.keys(message.labels), i = 0; i < keys.length; ++i) + writer.uint32(/* id 8, wireType 2 =*/66).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.labels[keys[i]]).ldelim(); + if (message.warnings != null && message.warnings.length) + for (var i = 0; i < message.warnings.length; ++i) + writer.uint32(/* id 9, wireType 2 =*/74).string(message.warnings[i]); return writer; }; /** - * Encodes the specified DeleteJobRequest message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.DeleteJobRequest.verify|verify} messages. + * Encodes the specified BatchOperationMetadata message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.BatchOperationMetadata.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata * @static - * @param {google.cloud.dataproc.v1.IDeleteJobRequest} message DeleteJobRequest message or plain object to encode + * @param {google.cloud.dataproc.v1.IBatchOperationMetadata} message BatchOperationMetadata message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteJobRequest.encodeDelimited = function encodeDelimited(message, writer) { + BatchOperationMetadata.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteJobRequest message from the specified reader or buffer. + * Decodes a BatchOperationMetadata message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.DeleteJobRequest} DeleteJobRequest + * @returns {google.cloud.dataproc.v1.BatchOperationMetadata} BatchOperationMetadata * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteJobRequest.decode = function decode(reader, length) { + BatchOperationMetadata.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.DeleteJobRequest(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.BatchOperationMetadata(), key, value; while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.projectId = reader.string(); + message.batch = reader.string(); + break; + } + case 2: { + message.batchUuid = reader.string(); break; } case 3: { - message.region = reader.string(); + message.createTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); break; } - case 2: { - message.jobId = reader.string(); + case 4: { + message.doneTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + break; + } + case 6: { + message.operationType = reader.int32(); + break; + } + case 7: { + message.description = reader.string(); + break; + } + case 8: { + if (message.labels === $util.emptyObject) + message.labels = {}; + var end2 = reader.uint32() + reader.pos; + key = ""; + value = ""; + while (reader.pos < end2) { + var tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.string(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.labels[key] = value; + break; + } + case 9: { + if (!(message.warnings && message.warnings.length)) + message.warnings = []; + message.warnings.push(reader.string()); break; } default: @@ -31053,345 +33200,371 @@ }; /** - * Decodes a DeleteJobRequest message from the specified reader or buffer, length delimited. + * Decodes a BatchOperationMetadata message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.DeleteJobRequest} DeleteJobRequest + * @returns {google.cloud.dataproc.v1.BatchOperationMetadata} BatchOperationMetadata * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteJobRequest.decodeDelimited = function decodeDelimited(reader) { + BatchOperationMetadata.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteJobRequest message. + * Verifies a BatchOperationMetadata message. * @function verify - * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteJobRequest.verify = function verify(message) { + BatchOperationMetadata.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.projectId != null && message.hasOwnProperty("projectId")) - if (!$util.isString(message.projectId)) - return "projectId: string expected"; - if (message.region != null && message.hasOwnProperty("region")) - if (!$util.isString(message.region)) - return "region: string expected"; - if (message.jobId != null && message.hasOwnProperty("jobId")) - if (!$util.isString(message.jobId)) - return "jobId: string expected"; + if (message.batch != null && message.hasOwnProperty("batch")) + if (!$util.isString(message.batch)) + return "batch: string expected"; + if (message.batchUuid != null && message.hasOwnProperty("batchUuid")) + if (!$util.isString(message.batchUuid)) + return "batchUuid: string expected"; + if (message.createTime != null && message.hasOwnProperty("createTime")) { + var error = $root.google.protobuf.Timestamp.verify(message.createTime); + if (error) + return "createTime." + error; + } + if (message.doneTime != null && message.hasOwnProperty("doneTime")) { + var error = $root.google.protobuf.Timestamp.verify(message.doneTime); + if (error) + return "doneTime." + error; + } + if (message.operationType != null && message.hasOwnProperty("operationType")) + switch (message.operationType) { + default: + return "operationType: enum value expected"; + case 0: + case 1: + break; + } + if (message.description != null && message.hasOwnProperty("description")) + if (!$util.isString(message.description)) + return "description: string expected"; + if (message.labels != null && message.hasOwnProperty("labels")) { + if (!$util.isObject(message.labels)) + return "labels: object expected"; + var key = Object.keys(message.labels); + for (var i = 0; i < key.length; ++i) + if (!$util.isString(message.labels[key[i]])) + return "labels: string{k:string} expected"; + } + if (message.warnings != null && message.hasOwnProperty("warnings")) { + if (!Array.isArray(message.warnings)) + return "warnings: array expected"; + for (var i = 0; i < message.warnings.length; ++i) + if (!$util.isString(message.warnings[i])) + return "warnings: string[] expected"; + } return null; }; /** - * Creates a DeleteJobRequest message from a plain object. Also converts values to their respective internal types. + * Creates a BatchOperationMetadata message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.DeleteJobRequest} DeleteJobRequest + * @returns {google.cloud.dataproc.v1.BatchOperationMetadata} BatchOperationMetadata */ - DeleteJobRequest.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.DeleteJobRequest) + BatchOperationMetadata.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.BatchOperationMetadata) return object; - var message = new $root.google.cloud.dataproc.v1.DeleteJobRequest(); - if (object.projectId != null) - message.projectId = String(object.projectId); - if (object.region != null) - message.region = String(object.region); - if (object.jobId != null) - message.jobId = String(object.jobId); + var message = new $root.google.cloud.dataproc.v1.BatchOperationMetadata(); + if (object.batch != null) + message.batch = String(object.batch); + if (object.batchUuid != null) + message.batchUuid = String(object.batchUuid); + if (object.createTime != null) { + if (typeof object.createTime !== "object") + throw TypeError(".google.cloud.dataproc.v1.BatchOperationMetadata.createTime: object expected"); + message.createTime = $root.google.protobuf.Timestamp.fromObject(object.createTime); + } + if (object.doneTime != null) { + if (typeof object.doneTime !== "object") + throw TypeError(".google.cloud.dataproc.v1.BatchOperationMetadata.doneTime: object expected"); + message.doneTime = $root.google.protobuf.Timestamp.fromObject(object.doneTime); + } + switch (object.operationType) { + default: + if (typeof object.operationType === "number") { + message.operationType = object.operationType; + break; + } + break; + case "BATCH_OPERATION_TYPE_UNSPECIFIED": + case 0: + message.operationType = 0; + break; + case "BATCH": + case 1: + message.operationType = 1; + break; + } + if (object.description != null) + message.description = String(object.description); + if (object.labels) { + if (typeof object.labels !== "object") + throw TypeError(".google.cloud.dataproc.v1.BatchOperationMetadata.labels: object expected"); + message.labels = {}; + for (var keys = Object.keys(object.labels), i = 0; i < keys.length; ++i) + message.labels[keys[i]] = String(object.labels[keys[i]]); + } + if (object.warnings) { + if (!Array.isArray(object.warnings)) + throw TypeError(".google.cloud.dataproc.v1.BatchOperationMetadata.warnings: array expected"); + message.warnings = []; + for (var i = 0; i < object.warnings.length; ++i) + message.warnings[i] = String(object.warnings[i]); + } return message; }; /** - * Creates a plain object from a DeleteJobRequest message. Also converts values to other types if specified. + * Creates a plain object from a BatchOperationMetadata message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata * @static - * @param {google.cloud.dataproc.v1.DeleteJobRequest} message DeleteJobRequest + * @param {google.cloud.dataproc.v1.BatchOperationMetadata} message BatchOperationMetadata * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteJobRequest.toObject = function toObject(message, options) { + BatchOperationMetadata.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; + if (options.arrays || options.defaults) + object.warnings = []; + if (options.objects || options.defaults) + object.labels = {}; if (options.defaults) { - object.projectId = ""; - object.jobId = ""; - object.region = ""; + object.batch = ""; + object.batchUuid = ""; + object.createTime = null; + object.doneTime = null; + object.operationType = options.enums === String ? "BATCH_OPERATION_TYPE_UNSPECIFIED" : 0; + object.description = ""; + } + if (message.batch != null && message.hasOwnProperty("batch")) + object.batch = message.batch; + if (message.batchUuid != null && message.hasOwnProperty("batchUuid")) + object.batchUuid = message.batchUuid; + if (message.createTime != null && message.hasOwnProperty("createTime")) + object.createTime = $root.google.protobuf.Timestamp.toObject(message.createTime, options); + if (message.doneTime != null && message.hasOwnProperty("doneTime")) + object.doneTime = $root.google.protobuf.Timestamp.toObject(message.doneTime, options); + if (message.operationType != null && message.hasOwnProperty("operationType")) + object.operationType = options.enums === String ? $root.google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType[message.operationType] === undefined ? message.operationType : $root.google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType[message.operationType] : message.operationType; + if (message.description != null && message.hasOwnProperty("description")) + object.description = message.description; + var keys2; + if (message.labels && (keys2 = Object.keys(message.labels)).length) { + object.labels = {}; + for (var j = 0; j < keys2.length; ++j) + object.labels[keys2[j]] = message.labels[keys2[j]]; + } + if (message.warnings && message.warnings.length) { + object.warnings = []; + for (var j = 0; j < message.warnings.length; ++j) + object.warnings[j] = message.warnings[j]; } - if (message.projectId != null && message.hasOwnProperty("projectId")) - object.projectId = message.projectId; - if (message.jobId != null && message.hasOwnProperty("jobId")) - object.jobId = message.jobId; - if (message.region != null && message.hasOwnProperty("region")) - object.region = message.region; return object; }; /** - * Converts this DeleteJobRequest to JSON. + * Converts this BatchOperationMetadata to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata * @instance * @returns {Object.} JSON object */ - DeleteJobRequest.prototype.toJSON = function toJSON() { + BatchOperationMetadata.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteJobRequest + * Gets the default type url for BatchOperationMetadata * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.DeleteJobRequest + * @memberof google.cloud.dataproc.v1.BatchOperationMetadata * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteJobRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + BatchOperationMetadata.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.DeleteJobRequest"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.BatchOperationMetadata"; }; - return DeleteJobRequest; - })(); - - v1.BatchOperationMetadata = (function() { - - /** - * Properties of a BatchOperationMetadata. - * @memberof google.cloud.dataproc.v1 - * @interface IBatchOperationMetadata - * @property {string|null} [batch] BatchOperationMetadata batch - * @property {string|null} [batchUuid] BatchOperationMetadata batchUuid - * @property {google.protobuf.ITimestamp|null} [createTime] BatchOperationMetadata createTime - * @property {google.protobuf.ITimestamp|null} [doneTime] BatchOperationMetadata doneTime - * @property {google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType|null} [operationType] BatchOperationMetadata operationType - * @property {string|null} [description] BatchOperationMetadata description - * @property {Object.|null} [labels] BatchOperationMetadata labels - * @property {Array.|null} [warnings] BatchOperationMetadata warnings - */ - /** - * Constructs a new BatchOperationMetadata. - * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a BatchOperationMetadata. - * @implements IBatchOperationMetadata - * @constructor - * @param {google.cloud.dataproc.v1.IBatchOperationMetadata=} [properties] Properties to set + * BatchOperationType enum. + * @name google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType + * @enum {number} + * @property {number} BATCH_OPERATION_TYPE_UNSPECIFIED=0 BATCH_OPERATION_TYPE_UNSPECIFIED value + * @property {number} BATCH=1 BATCH value */ - function BatchOperationMetadata(properties) { - this.labels = {}; - this.warnings = []; - if (properties) - for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + BatchOperationMetadata.BatchOperationType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "BATCH_OPERATION_TYPE_UNSPECIFIED"] = 0; + values[valuesById[1] = "BATCH"] = 1; + return values; + })(); - /** - * BatchOperationMetadata batch. - * @member {string} batch - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata - * @instance - */ - BatchOperationMetadata.prototype.batch = ""; + return BatchOperationMetadata; + })(); - /** - * BatchOperationMetadata batchUuid. - * @member {string} batchUuid - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata - * @instance - */ - BatchOperationMetadata.prototype.batchUuid = ""; + v1.ClusterOperationStatus = (function() { /** - * BatchOperationMetadata createTime. - * @member {google.protobuf.ITimestamp|null|undefined} createTime - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata - * @instance + * Properties of a ClusterOperationStatus. + * @memberof google.cloud.dataproc.v1 + * @interface IClusterOperationStatus + * @property {google.cloud.dataproc.v1.ClusterOperationStatus.State|null} [state] ClusterOperationStatus state + * @property {string|null} [innerState] ClusterOperationStatus innerState + * @property {string|null} [details] ClusterOperationStatus details + * @property {google.protobuf.ITimestamp|null} [stateStartTime] ClusterOperationStatus stateStartTime */ - BatchOperationMetadata.prototype.createTime = null; /** - * BatchOperationMetadata doneTime. - * @member {google.protobuf.ITimestamp|null|undefined} doneTime - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata - * @instance + * Constructs a new ClusterOperationStatus. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a ClusterOperationStatus. + * @implements IClusterOperationStatus + * @constructor + * @param {google.cloud.dataproc.v1.IClusterOperationStatus=} [properties] Properties to set */ - BatchOperationMetadata.prototype.doneTime = null; + function ClusterOperationStatus(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } /** - * BatchOperationMetadata operationType. - * @member {google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType} operationType - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * ClusterOperationStatus state. + * @member {google.cloud.dataproc.v1.ClusterOperationStatus.State} state + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus * @instance */ - BatchOperationMetadata.prototype.operationType = 0; + ClusterOperationStatus.prototype.state = 0; /** - * BatchOperationMetadata description. - * @member {string} description - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * ClusterOperationStatus innerState. + * @member {string} innerState + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus * @instance */ - BatchOperationMetadata.prototype.description = ""; + ClusterOperationStatus.prototype.innerState = ""; /** - * BatchOperationMetadata labels. - * @member {Object.} labels - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * ClusterOperationStatus details. + * @member {string} details + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus * @instance */ - BatchOperationMetadata.prototype.labels = $util.emptyObject; + ClusterOperationStatus.prototype.details = ""; /** - * BatchOperationMetadata warnings. - * @member {Array.} warnings - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * ClusterOperationStatus stateStartTime. + * @member {google.protobuf.ITimestamp|null|undefined} stateStartTime + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus * @instance */ - BatchOperationMetadata.prototype.warnings = $util.emptyArray; + ClusterOperationStatus.prototype.stateStartTime = null; /** - * Creates a new BatchOperationMetadata instance using the specified properties. + * Creates a new ClusterOperationStatus instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus * @static - * @param {google.cloud.dataproc.v1.IBatchOperationMetadata=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.BatchOperationMetadata} BatchOperationMetadata instance + * @param {google.cloud.dataproc.v1.IClusterOperationStatus=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.ClusterOperationStatus} ClusterOperationStatus instance */ - BatchOperationMetadata.create = function create(properties) { - return new BatchOperationMetadata(properties); + ClusterOperationStatus.create = function create(properties) { + return new ClusterOperationStatus(properties); }; /** - * Encodes the specified BatchOperationMetadata message. Does not implicitly {@link google.cloud.dataproc.v1.BatchOperationMetadata.verify|verify} messages. + * Encodes the specified ClusterOperationStatus message. Does not implicitly {@link google.cloud.dataproc.v1.ClusterOperationStatus.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus * @static - * @param {google.cloud.dataproc.v1.IBatchOperationMetadata} message BatchOperationMetadata message or plain object to encode + * @param {google.cloud.dataproc.v1.IClusterOperationStatus} message ClusterOperationStatus message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BatchOperationMetadata.encode = function encode(message, writer) { + ClusterOperationStatus.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.batch != null && Object.hasOwnProperty.call(message, "batch")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.batch); - if (message.batchUuid != null && Object.hasOwnProperty.call(message, "batchUuid")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.batchUuid); - if (message.createTime != null && Object.hasOwnProperty.call(message, "createTime")) - $root.google.protobuf.Timestamp.encode(message.createTime, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.doneTime != null && Object.hasOwnProperty.call(message, "doneTime")) - $root.google.protobuf.Timestamp.encode(message.doneTime, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.operationType != null && Object.hasOwnProperty.call(message, "operationType")) - writer.uint32(/* id 6, wireType 0 =*/48).int32(message.operationType); - if (message.description != null && Object.hasOwnProperty.call(message, "description")) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.description); - if (message.labels != null && Object.hasOwnProperty.call(message, "labels")) - for (var keys = Object.keys(message.labels), i = 0; i < keys.length; ++i) - writer.uint32(/* id 8, wireType 2 =*/66).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.labels[keys[i]]).ldelim(); - if (message.warnings != null && message.warnings.length) - for (var i = 0; i < message.warnings.length; ++i) - writer.uint32(/* id 9, wireType 2 =*/74).string(message.warnings[i]); + if (message.state != null && Object.hasOwnProperty.call(message, "state")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.state); + if (message.innerState != null && Object.hasOwnProperty.call(message, "innerState")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.innerState); + if (message.details != null && Object.hasOwnProperty.call(message, "details")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.details); + if (message.stateStartTime != null && Object.hasOwnProperty.call(message, "stateStartTime")) + $root.google.protobuf.Timestamp.encode(message.stateStartTime, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified BatchOperationMetadata message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.BatchOperationMetadata.verify|verify} messages. + * Encodes the specified ClusterOperationStatus message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ClusterOperationStatus.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus * @static - * @param {google.cloud.dataproc.v1.IBatchOperationMetadata} message BatchOperationMetadata message or plain object to encode + * @param {google.cloud.dataproc.v1.IClusterOperationStatus} message ClusterOperationStatus message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BatchOperationMetadata.encodeDelimited = function encodeDelimited(message, writer) { + ClusterOperationStatus.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BatchOperationMetadata message from the specified reader or buffer. + * Decodes a ClusterOperationStatus message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.BatchOperationMetadata} BatchOperationMetadata + * @returns {google.cloud.dataproc.v1.ClusterOperationStatus} ClusterOperationStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BatchOperationMetadata.decode = function decode(reader, length) { + ClusterOperationStatus.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.BatchOperationMetadata(), key, value; + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ClusterOperationStatus(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.batch = reader.string(); + message.state = reader.int32(); break; } case 2: { - message.batchUuid = reader.string(); + message.innerState = reader.string(); break; } case 3: { - message.createTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + message.details = reader.string(); break; } case 4: { - message.doneTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); - break; - } - case 6: { - message.operationType = reader.int32(); - break; - } - case 7: { - message.description = reader.string(); - break; - } - case 8: { - if (message.labels === $util.emptyObject) - message.labels = {}; - var end2 = reader.uint32() + reader.pos; - key = ""; - value = ""; - while (reader.pos < end2) { - var tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = reader.string(); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.labels[key] = value; - break; - } - case 9: { - if (!(message.warnings && message.warnings.length)) - message.warnings = []; - message.warnings.push(reader.string()); + message.stateStartTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); break; } default: @@ -31403,253 +33576,209 @@ }; /** - * Decodes a BatchOperationMetadata message from the specified reader or buffer, length delimited. + * Decodes a ClusterOperationStatus message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.BatchOperationMetadata} BatchOperationMetadata + * @returns {google.cloud.dataproc.v1.ClusterOperationStatus} ClusterOperationStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BatchOperationMetadata.decodeDelimited = function decodeDelimited(reader) { + ClusterOperationStatus.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BatchOperationMetadata message. + * Verifies a ClusterOperationStatus message. * @function verify - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BatchOperationMetadata.verify = function verify(message) { + ClusterOperationStatus.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.batch != null && message.hasOwnProperty("batch")) - if (!$util.isString(message.batch)) - return "batch: string expected"; - if (message.batchUuid != null && message.hasOwnProperty("batchUuid")) - if (!$util.isString(message.batchUuid)) - return "batchUuid: string expected"; - if (message.createTime != null && message.hasOwnProperty("createTime")) { - var error = $root.google.protobuf.Timestamp.verify(message.createTime); - if (error) - return "createTime." + error; - } - if (message.doneTime != null && message.hasOwnProperty("doneTime")) { - var error = $root.google.protobuf.Timestamp.verify(message.doneTime); - if (error) - return "doneTime." + error; - } - if (message.operationType != null && message.hasOwnProperty("operationType")) - switch (message.operationType) { + if (message.state != null && message.hasOwnProperty("state")) + switch (message.state) { default: - return "operationType: enum value expected"; + return "state: enum value expected"; case 0: case 1: + case 2: + case 3: break; } - if (message.description != null && message.hasOwnProperty("description")) - if (!$util.isString(message.description)) - return "description: string expected"; - if (message.labels != null && message.hasOwnProperty("labels")) { - if (!$util.isObject(message.labels)) - return "labels: object expected"; - var key = Object.keys(message.labels); - for (var i = 0; i < key.length; ++i) - if (!$util.isString(message.labels[key[i]])) - return "labels: string{k:string} expected"; - } - if (message.warnings != null && message.hasOwnProperty("warnings")) { - if (!Array.isArray(message.warnings)) - return "warnings: array expected"; - for (var i = 0; i < message.warnings.length; ++i) - if (!$util.isString(message.warnings[i])) - return "warnings: string[] expected"; + if (message.innerState != null && message.hasOwnProperty("innerState")) + if (!$util.isString(message.innerState)) + return "innerState: string expected"; + if (message.details != null && message.hasOwnProperty("details")) + if (!$util.isString(message.details)) + return "details: string expected"; + if (message.stateStartTime != null && message.hasOwnProperty("stateStartTime")) { + var error = $root.google.protobuf.Timestamp.verify(message.stateStartTime); + if (error) + return "stateStartTime." + error; } return null; }; /** - * Creates a BatchOperationMetadata message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata - * @static - * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.BatchOperationMetadata} BatchOperationMetadata - */ - BatchOperationMetadata.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.BatchOperationMetadata) - return object; - var message = new $root.google.cloud.dataproc.v1.BatchOperationMetadata(); - if (object.batch != null) - message.batch = String(object.batch); - if (object.batchUuid != null) - message.batchUuid = String(object.batchUuid); - if (object.createTime != null) { - if (typeof object.createTime !== "object") - throw TypeError(".google.cloud.dataproc.v1.BatchOperationMetadata.createTime: object expected"); - message.createTime = $root.google.protobuf.Timestamp.fromObject(object.createTime); - } - if (object.doneTime != null) { - if (typeof object.doneTime !== "object") - throw TypeError(".google.cloud.dataproc.v1.BatchOperationMetadata.doneTime: object expected"); - message.doneTime = $root.google.protobuf.Timestamp.fromObject(object.doneTime); - } - switch (object.operationType) { + * Creates a ClusterOperationStatus message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1.ClusterOperationStatus} ClusterOperationStatus + */ + ClusterOperationStatus.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.ClusterOperationStatus) + return object; + var message = new $root.google.cloud.dataproc.v1.ClusterOperationStatus(); + switch (object.state) { default: - if (typeof object.operationType === "number") { - message.operationType = object.operationType; + if (typeof object.state === "number") { + message.state = object.state; break; } break; - case "BATCH_OPERATION_TYPE_UNSPECIFIED": + case "UNKNOWN": case 0: - message.operationType = 0; + message.state = 0; break; - case "BATCH": + case "PENDING": case 1: - message.operationType = 1; + message.state = 1; + break; + case "RUNNING": + case 2: + message.state = 2; + break; + case "DONE": + case 3: + message.state = 3; break; } - if (object.description != null) - message.description = String(object.description); - if (object.labels) { - if (typeof object.labels !== "object") - throw TypeError(".google.cloud.dataproc.v1.BatchOperationMetadata.labels: object expected"); - message.labels = {}; - for (var keys = Object.keys(object.labels), i = 0; i < keys.length; ++i) - message.labels[keys[i]] = String(object.labels[keys[i]]); - } - if (object.warnings) { - if (!Array.isArray(object.warnings)) - throw TypeError(".google.cloud.dataproc.v1.BatchOperationMetadata.warnings: array expected"); - message.warnings = []; - for (var i = 0; i < object.warnings.length; ++i) - message.warnings[i] = String(object.warnings[i]); + if (object.innerState != null) + message.innerState = String(object.innerState); + if (object.details != null) + message.details = String(object.details); + if (object.stateStartTime != null) { + if (typeof object.stateStartTime !== "object") + throw TypeError(".google.cloud.dataproc.v1.ClusterOperationStatus.stateStartTime: object expected"); + message.stateStartTime = $root.google.protobuf.Timestamp.fromObject(object.stateStartTime); } return message; }; /** - * Creates a plain object from a BatchOperationMetadata message. Also converts values to other types if specified. + * Creates a plain object from a ClusterOperationStatus message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus * @static - * @param {google.cloud.dataproc.v1.BatchOperationMetadata} message BatchOperationMetadata + * @param {google.cloud.dataproc.v1.ClusterOperationStatus} message ClusterOperationStatus * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - BatchOperationMetadata.toObject = function toObject(message, options) { + ClusterOperationStatus.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.arrays || options.defaults) - object.warnings = []; - if (options.objects || options.defaults) - object.labels = {}; if (options.defaults) { - object.batch = ""; - object.batchUuid = ""; - object.createTime = null; - object.doneTime = null; - object.operationType = options.enums === String ? "BATCH_OPERATION_TYPE_UNSPECIFIED" : 0; - object.description = ""; - } - if (message.batch != null && message.hasOwnProperty("batch")) - object.batch = message.batch; - if (message.batchUuid != null && message.hasOwnProperty("batchUuid")) - object.batchUuid = message.batchUuid; - if (message.createTime != null && message.hasOwnProperty("createTime")) - object.createTime = $root.google.protobuf.Timestamp.toObject(message.createTime, options); - if (message.doneTime != null && message.hasOwnProperty("doneTime")) - object.doneTime = $root.google.protobuf.Timestamp.toObject(message.doneTime, options); - if (message.operationType != null && message.hasOwnProperty("operationType")) - object.operationType = options.enums === String ? $root.google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType[message.operationType] === undefined ? message.operationType : $root.google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType[message.operationType] : message.operationType; - if (message.description != null && message.hasOwnProperty("description")) - object.description = message.description; - var keys2; - if (message.labels && (keys2 = Object.keys(message.labels)).length) { - object.labels = {}; - for (var j = 0; j < keys2.length; ++j) - object.labels[keys2[j]] = message.labels[keys2[j]]; - } - if (message.warnings && message.warnings.length) { - object.warnings = []; - for (var j = 0; j < message.warnings.length; ++j) - object.warnings[j] = message.warnings[j]; + object.state = options.enums === String ? "UNKNOWN" : 0; + object.innerState = ""; + object.details = ""; + object.stateStartTime = null; } + if (message.state != null && message.hasOwnProperty("state")) + object.state = options.enums === String ? $root.google.cloud.dataproc.v1.ClusterOperationStatus.State[message.state] === undefined ? message.state : $root.google.cloud.dataproc.v1.ClusterOperationStatus.State[message.state] : message.state; + if (message.innerState != null && message.hasOwnProperty("innerState")) + object.innerState = message.innerState; + if (message.details != null && message.hasOwnProperty("details")) + object.details = message.details; + if (message.stateStartTime != null && message.hasOwnProperty("stateStartTime")) + object.stateStartTime = $root.google.protobuf.Timestamp.toObject(message.stateStartTime, options); return object; }; /** - * Converts this BatchOperationMetadata to JSON. + * Converts this ClusterOperationStatus to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus * @instance * @returns {Object.} JSON object */ - BatchOperationMetadata.prototype.toJSON = function toJSON() { + ClusterOperationStatus.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for BatchOperationMetadata + * Gets the default type url for ClusterOperationStatus * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.BatchOperationMetadata + * @memberof google.cloud.dataproc.v1.ClusterOperationStatus * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - BatchOperationMetadata.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ClusterOperationStatus.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.BatchOperationMetadata"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.ClusterOperationStatus"; }; /** - * BatchOperationType enum. - * @name google.cloud.dataproc.v1.BatchOperationMetadata.BatchOperationType + * State enum. + * @name google.cloud.dataproc.v1.ClusterOperationStatus.State * @enum {number} - * @property {number} BATCH_OPERATION_TYPE_UNSPECIFIED=0 BATCH_OPERATION_TYPE_UNSPECIFIED value - * @property {number} BATCH=1 BATCH value + * @property {number} UNKNOWN=0 UNKNOWN value + * @property {number} PENDING=1 PENDING value + * @property {number} RUNNING=2 RUNNING value + * @property {number} DONE=3 DONE value */ - BatchOperationMetadata.BatchOperationType = (function() { + ClusterOperationStatus.State = (function() { var valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "BATCH_OPERATION_TYPE_UNSPECIFIED"] = 0; - values[valuesById[1] = "BATCH"] = 1; + values[valuesById[0] = "UNKNOWN"] = 0; + values[valuesById[1] = "PENDING"] = 1; + values[valuesById[2] = "RUNNING"] = 2; + values[valuesById[3] = "DONE"] = 3; return values; })(); - return BatchOperationMetadata; + return ClusterOperationStatus; })(); - v1.ClusterOperationStatus = (function() { + v1.ClusterOperationMetadata = (function() { /** - * Properties of a ClusterOperationStatus. + * Properties of a ClusterOperationMetadata. * @memberof google.cloud.dataproc.v1 - * @interface IClusterOperationStatus - * @property {google.cloud.dataproc.v1.ClusterOperationStatus.State|null} [state] ClusterOperationStatus state - * @property {string|null} [innerState] ClusterOperationStatus innerState - * @property {string|null} [details] ClusterOperationStatus details - * @property {google.protobuf.ITimestamp|null} [stateStartTime] ClusterOperationStatus stateStartTime + * @interface IClusterOperationMetadata + * @property {string|null} [clusterName] ClusterOperationMetadata clusterName + * @property {string|null} [clusterUuid] ClusterOperationMetadata clusterUuid + * @property {google.cloud.dataproc.v1.IClusterOperationStatus|null} [status] ClusterOperationMetadata status + * @property {Array.|null} [statusHistory] ClusterOperationMetadata statusHistory + * @property {string|null} [operationType] ClusterOperationMetadata operationType + * @property {string|null} [description] ClusterOperationMetadata description + * @property {Object.|null} [labels] ClusterOperationMetadata labels + * @property {Array.|null} [warnings] ClusterOperationMetadata warnings */ /** - * Constructs a new ClusterOperationStatus. + * Constructs a new ClusterOperationMetadata. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a ClusterOperationStatus. - * @implements IClusterOperationStatus + * @classdesc Represents a ClusterOperationMetadata. + * @implements IClusterOperationMetadata * @constructor - * @param {google.cloud.dataproc.v1.IClusterOperationStatus=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.IClusterOperationMetadata=} [properties] Properties to set */ - function ClusterOperationStatus(properties) { + function ClusterOperationMetadata(properties) { + this.statusHistory = []; + this.labels = {}; + this.warnings = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -31657,117 +33786,199 @@ } /** - * ClusterOperationStatus state. - * @member {google.cloud.dataproc.v1.ClusterOperationStatus.State} state - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * ClusterOperationMetadata clusterName. + * @member {string} clusterName + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @instance */ - ClusterOperationStatus.prototype.state = 0; + ClusterOperationMetadata.prototype.clusterName = ""; /** - * ClusterOperationStatus innerState. - * @member {string} innerState - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * ClusterOperationMetadata clusterUuid. + * @member {string} clusterUuid + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @instance */ - ClusterOperationStatus.prototype.innerState = ""; + ClusterOperationMetadata.prototype.clusterUuid = ""; /** - * ClusterOperationStatus details. - * @member {string} details - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * ClusterOperationMetadata status. + * @member {google.cloud.dataproc.v1.IClusterOperationStatus|null|undefined} status + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @instance */ - ClusterOperationStatus.prototype.details = ""; + ClusterOperationMetadata.prototype.status = null; /** - * ClusterOperationStatus stateStartTime. - * @member {google.protobuf.ITimestamp|null|undefined} stateStartTime - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * ClusterOperationMetadata statusHistory. + * @member {Array.} statusHistory + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @instance */ - ClusterOperationStatus.prototype.stateStartTime = null; + ClusterOperationMetadata.prototype.statusHistory = $util.emptyArray; /** - * Creates a new ClusterOperationStatus instance using the specified properties. + * ClusterOperationMetadata operationType. + * @member {string} operationType + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @instance + */ + ClusterOperationMetadata.prototype.operationType = ""; + + /** + * ClusterOperationMetadata description. + * @member {string} description + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @instance + */ + ClusterOperationMetadata.prototype.description = ""; + + /** + * ClusterOperationMetadata labels. + * @member {Object.} labels + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @instance + */ + ClusterOperationMetadata.prototype.labels = $util.emptyObject; + + /** + * ClusterOperationMetadata warnings. + * @member {Array.} warnings + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @instance + */ + ClusterOperationMetadata.prototype.warnings = $util.emptyArray; + + /** + * Creates a new ClusterOperationMetadata instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @static - * @param {google.cloud.dataproc.v1.IClusterOperationStatus=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.ClusterOperationStatus} ClusterOperationStatus instance + * @param {google.cloud.dataproc.v1.IClusterOperationMetadata=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.ClusterOperationMetadata} ClusterOperationMetadata instance */ - ClusterOperationStatus.create = function create(properties) { - return new ClusterOperationStatus(properties); + ClusterOperationMetadata.create = function create(properties) { + return new ClusterOperationMetadata(properties); }; /** - * Encodes the specified ClusterOperationStatus message. Does not implicitly {@link google.cloud.dataproc.v1.ClusterOperationStatus.verify|verify} messages. + * Encodes the specified ClusterOperationMetadata message. Does not implicitly {@link google.cloud.dataproc.v1.ClusterOperationMetadata.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @static - * @param {google.cloud.dataproc.v1.IClusterOperationStatus} message ClusterOperationStatus message or plain object to encode + * @param {google.cloud.dataproc.v1.IClusterOperationMetadata} message ClusterOperationMetadata message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ClusterOperationStatus.encode = function encode(message, writer) { + ClusterOperationMetadata.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.state != null && Object.hasOwnProperty.call(message, "state")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.state); - if (message.innerState != null && Object.hasOwnProperty.call(message, "innerState")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.innerState); - if (message.details != null && Object.hasOwnProperty.call(message, "details")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.details); - if (message.stateStartTime != null && Object.hasOwnProperty.call(message, "stateStartTime")) - $root.google.protobuf.Timestamp.encode(message.stateStartTime, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.clusterName); + if (message.clusterUuid != null && Object.hasOwnProperty.call(message, "clusterUuid")) + writer.uint32(/* id 8, wireType 2 =*/66).string(message.clusterUuid); + if (message.status != null && Object.hasOwnProperty.call(message, "status")) + $root.google.cloud.dataproc.v1.ClusterOperationStatus.encode(message.status, writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); + if (message.statusHistory != null && message.statusHistory.length) + for (var i = 0; i < message.statusHistory.length; ++i) + $root.google.cloud.dataproc.v1.ClusterOperationStatus.encode(message.statusHistory[i], writer.uint32(/* id 10, wireType 2 =*/82).fork()).ldelim(); + if (message.operationType != null && Object.hasOwnProperty.call(message, "operationType")) + writer.uint32(/* id 11, wireType 2 =*/90).string(message.operationType); + if (message.description != null && Object.hasOwnProperty.call(message, "description")) + writer.uint32(/* id 12, wireType 2 =*/98).string(message.description); + if (message.labels != null && Object.hasOwnProperty.call(message, "labels")) + for (var keys = Object.keys(message.labels), i = 0; i < keys.length; ++i) + writer.uint32(/* id 13, wireType 2 =*/106).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.labels[keys[i]]).ldelim(); + if (message.warnings != null && message.warnings.length) + for (var i = 0; i < message.warnings.length; ++i) + writer.uint32(/* id 14, wireType 2 =*/114).string(message.warnings[i]); return writer; }; /** - * Encodes the specified ClusterOperationStatus message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ClusterOperationStatus.verify|verify} messages. + * Encodes the specified ClusterOperationMetadata message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ClusterOperationMetadata.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @static - * @param {google.cloud.dataproc.v1.IClusterOperationStatus} message ClusterOperationStatus message or plain object to encode + * @param {google.cloud.dataproc.v1.IClusterOperationMetadata} message ClusterOperationMetadata message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ClusterOperationStatus.encodeDelimited = function encodeDelimited(message, writer) { + ClusterOperationMetadata.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ClusterOperationStatus message from the specified reader or buffer. + * Decodes a ClusterOperationMetadata message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.ClusterOperationStatus} ClusterOperationStatus + * @returns {google.cloud.dataproc.v1.ClusterOperationMetadata} ClusterOperationMetadata * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ClusterOperationStatus.decode = function decode(reader, length) { + ClusterOperationMetadata.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ClusterOperationStatus(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ClusterOperationMetadata(), key, value; while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.state = reader.int32(); + case 7: { + message.clusterName = reader.string(); break; } - case 2: { - message.innerState = reader.string(); + case 8: { + message.clusterUuid = reader.string(); break; } - case 3: { - message.details = reader.string(); + case 9: { + message.status = $root.google.cloud.dataproc.v1.ClusterOperationStatus.decode(reader, reader.uint32()); break; } - case 4: { - message.stateStartTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + case 10: { + if (!(message.statusHistory && message.statusHistory.length)) + message.statusHistory = []; + message.statusHistory.push($root.google.cloud.dataproc.v1.ClusterOperationStatus.decode(reader, reader.uint32())); + break; + } + case 11: { + message.operationType = reader.string(); + break; + } + case 12: { + message.description = reader.string(); + break; + } + case 13: { + if (message.labels === $util.emptyObject) + message.labels = {}; + var end2 = reader.uint32() + reader.pos; + key = ""; + value = ""; + while (reader.pos < end2) { + var tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.string(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.labels[key] = value; + break; + } + case 14: { + if (!(message.warnings && message.warnings.length)) + message.warnings = []; + message.warnings.push(reader.string()); break; } default: @@ -31779,206 +33990,237 @@ }; /** - * Decodes a ClusterOperationStatus message from the specified reader or buffer, length delimited. + * Decodes a ClusterOperationMetadata message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.ClusterOperationStatus} ClusterOperationStatus + * @returns {google.cloud.dataproc.v1.ClusterOperationMetadata} ClusterOperationMetadata * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ClusterOperationStatus.decodeDelimited = function decodeDelimited(reader) { + ClusterOperationMetadata.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ClusterOperationStatus message. + * Verifies a ClusterOperationMetadata message. * @function verify - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ClusterOperationStatus.verify = function verify(message) { + ClusterOperationMetadata.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.state != null && message.hasOwnProperty("state")) - switch (message.state) { - default: - return "state: enum value expected"; - case 0: - case 1: - case 2: - case 3: - break; - } - if (message.innerState != null && message.hasOwnProperty("innerState")) - if (!$util.isString(message.innerState)) - return "innerState: string expected"; - if (message.details != null && message.hasOwnProperty("details")) - if (!$util.isString(message.details)) - return "details: string expected"; - if (message.stateStartTime != null && message.hasOwnProperty("stateStartTime")) { - var error = $root.google.protobuf.Timestamp.verify(message.stateStartTime); + if (message.clusterName != null && message.hasOwnProperty("clusterName")) + if (!$util.isString(message.clusterName)) + return "clusterName: string expected"; + if (message.clusterUuid != null && message.hasOwnProperty("clusterUuid")) + if (!$util.isString(message.clusterUuid)) + return "clusterUuid: string expected"; + if (message.status != null && message.hasOwnProperty("status")) { + var error = $root.google.cloud.dataproc.v1.ClusterOperationStatus.verify(message.status); if (error) - return "stateStartTime." + error; + return "status." + error; + } + if (message.statusHistory != null && message.hasOwnProperty("statusHistory")) { + if (!Array.isArray(message.statusHistory)) + return "statusHistory: array expected"; + for (var i = 0; i < message.statusHistory.length; ++i) { + var error = $root.google.cloud.dataproc.v1.ClusterOperationStatus.verify(message.statusHistory[i]); + if (error) + return "statusHistory." + error; + } + } + if (message.operationType != null && message.hasOwnProperty("operationType")) + if (!$util.isString(message.operationType)) + return "operationType: string expected"; + if (message.description != null && message.hasOwnProperty("description")) + if (!$util.isString(message.description)) + return "description: string expected"; + if (message.labels != null && message.hasOwnProperty("labels")) { + if (!$util.isObject(message.labels)) + return "labels: object expected"; + var key = Object.keys(message.labels); + for (var i = 0; i < key.length; ++i) + if (!$util.isString(message.labels[key[i]])) + return "labels: string{k:string} expected"; + } + if (message.warnings != null && message.hasOwnProperty("warnings")) { + if (!Array.isArray(message.warnings)) + return "warnings: array expected"; + for (var i = 0; i < message.warnings.length; ++i) + if (!$util.isString(message.warnings[i])) + return "warnings: string[] expected"; } return null; }; /** - * Creates a ClusterOperationStatus message from a plain object. Also converts values to their respective internal types. + * Creates a ClusterOperationMetadata message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.ClusterOperationStatus} ClusterOperationStatus + * @returns {google.cloud.dataproc.v1.ClusterOperationMetadata} ClusterOperationMetadata */ - ClusterOperationStatus.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.ClusterOperationStatus) + ClusterOperationMetadata.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.ClusterOperationMetadata) return object; - var message = new $root.google.cloud.dataproc.v1.ClusterOperationStatus(); - switch (object.state) { - default: - if (typeof object.state === "number") { - message.state = object.state; - break; + var message = new $root.google.cloud.dataproc.v1.ClusterOperationMetadata(); + if (object.clusterName != null) + message.clusterName = String(object.clusterName); + if (object.clusterUuid != null) + message.clusterUuid = String(object.clusterUuid); + if (object.status != null) { + if (typeof object.status !== "object") + throw TypeError(".google.cloud.dataproc.v1.ClusterOperationMetadata.status: object expected"); + message.status = $root.google.cloud.dataproc.v1.ClusterOperationStatus.fromObject(object.status); + } + if (object.statusHistory) { + if (!Array.isArray(object.statusHistory)) + throw TypeError(".google.cloud.dataproc.v1.ClusterOperationMetadata.statusHistory: array expected"); + message.statusHistory = []; + for (var i = 0; i < object.statusHistory.length; ++i) { + if (typeof object.statusHistory[i] !== "object") + throw TypeError(".google.cloud.dataproc.v1.ClusterOperationMetadata.statusHistory: object expected"); + message.statusHistory[i] = $root.google.cloud.dataproc.v1.ClusterOperationStatus.fromObject(object.statusHistory[i]); } - break; - case "UNKNOWN": - case 0: - message.state = 0; - break; - case "PENDING": - case 1: - message.state = 1; - break; - case "RUNNING": - case 2: - message.state = 2; - break; - case "DONE": - case 3: - message.state = 3; - break; } - if (object.innerState != null) - message.innerState = String(object.innerState); - if (object.details != null) - message.details = String(object.details); - if (object.stateStartTime != null) { - if (typeof object.stateStartTime !== "object") - throw TypeError(".google.cloud.dataproc.v1.ClusterOperationStatus.stateStartTime: object expected"); - message.stateStartTime = $root.google.protobuf.Timestamp.fromObject(object.stateStartTime); + if (object.operationType != null) + message.operationType = String(object.operationType); + if (object.description != null) + message.description = String(object.description); + if (object.labels) { + if (typeof object.labels !== "object") + throw TypeError(".google.cloud.dataproc.v1.ClusterOperationMetadata.labels: object expected"); + message.labels = {}; + for (var keys = Object.keys(object.labels), i = 0; i < keys.length; ++i) + message.labels[keys[i]] = String(object.labels[keys[i]]); + } + if (object.warnings) { + if (!Array.isArray(object.warnings)) + throw TypeError(".google.cloud.dataproc.v1.ClusterOperationMetadata.warnings: array expected"); + message.warnings = []; + for (var i = 0; i < object.warnings.length; ++i) + message.warnings[i] = String(object.warnings[i]); } return message; }; /** - * Creates a plain object from a ClusterOperationStatus message. Also converts values to other types if specified. + * Creates a plain object from a ClusterOperationMetadata message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @static - * @param {google.cloud.dataproc.v1.ClusterOperationStatus} message ClusterOperationStatus + * @param {google.cloud.dataproc.v1.ClusterOperationMetadata} message ClusterOperationMetadata * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ClusterOperationStatus.toObject = function toObject(message, options) { + ClusterOperationMetadata.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; + if (options.arrays || options.defaults) { + object.statusHistory = []; + object.warnings = []; + } + if (options.objects || options.defaults) + object.labels = {}; if (options.defaults) { - object.state = options.enums === String ? "UNKNOWN" : 0; - object.innerState = ""; - object.details = ""; - object.stateStartTime = null; + object.clusterName = ""; + object.clusterUuid = ""; + object.status = null; + object.operationType = ""; + object.description = ""; + } + if (message.clusterName != null && message.hasOwnProperty("clusterName")) + object.clusterName = message.clusterName; + if (message.clusterUuid != null && message.hasOwnProperty("clusterUuid")) + object.clusterUuid = message.clusterUuid; + if (message.status != null && message.hasOwnProperty("status")) + object.status = $root.google.cloud.dataproc.v1.ClusterOperationStatus.toObject(message.status, options); + if (message.statusHistory && message.statusHistory.length) { + object.statusHistory = []; + for (var j = 0; j < message.statusHistory.length; ++j) + object.statusHistory[j] = $root.google.cloud.dataproc.v1.ClusterOperationStatus.toObject(message.statusHistory[j], options); + } + if (message.operationType != null && message.hasOwnProperty("operationType")) + object.operationType = message.operationType; + if (message.description != null && message.hasOwnProperty("description")) + object.description = message.description; + var keys2; + if (message.labels && (keys2 = Object.keys(message.labels)).length) { + object.labels = {}; + for (var j = 0; j < keys2.length; ++j) + object.labels[keys2[j]] = message.labels[keys2[j]]; + } + if (message.warnings && message.warnings.length) { + object.warnings = []; + for (var j = 0; j < message.warnings.length; ++j) + object.warnings[j] = message.warnings[j]; } - if (message.state != null && message.hasOwnProperty("state")) - object.state = options.enums === String ? $root.google.cloud.dataproc.v1.ClusterOperationStatus.State[message.state] === undefined ? message.state : $root.google.cloud.dataproc.v1.ClusterOperationStatus.State[message.state] : message.state; - if (message.innerState != null && message.hasOwnProperty("innerState")) - object.innerState = message.innerState; - if (message.details != null && message.hasOwnProperty("details")) - object.details = message.details; - if (message.stateStartTime != null && message.hasOwnProperty("stateStartTime")) - object.stateStartTime = $root.google.protobuf.Timestamp.toObject(message.stateStartTime, options); return object; }; /** - * Converts this ClusterOperationStatus to JSON. + * Converts this ClusterOperationMetadata to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @instance * @returns {Object.} JSON object */ - ClusterOperationStatus.prototype.toJSON = function toJSON() { + ClusterOperationMetadata.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ClusterOperationStatus + * Gets the default type url for ClusterOperationMetadata * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.ClusterOperationStatus + * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ClusterOperationStatus.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ClusterOperationMetadata.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.ClusterOperationStatus"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.ClusterOperationMetadata"; }; - /** - * State enum. - * @name google.cloud.dataproc.v1.ClusterOperationStatus.State - * @enum {number} - * @property {number} UNKNOWN=0 UNKNOWN value - * @property {number} PENDING=1 PENDING value - * @property {number} RUNNING=2 RUNNING value - * @property {number} DONE=3 DONE value - */ - ClusterOperationStatus.State = (function() { - var valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "UNKNOWN"] = 0; - values[valuesById[1] = "PENDING"] = 1; - values[valuesById[2] = "RUNNING"] = 2; - values[valuesById[3] = "DONE"] = 3; - return values; - })(); - - return ClusterOperationStatus; + return ClusterOperationMetadata; })(); - v1.ClusterOperationMetadata = (function() { + v1.NodeGroupOperationMetadata = (function() { /** - * Properties of a ClusterOperationMetadata. + * Properties of a NodeGroupOperationMetadata. * @memberof google.cloud.dataproc.v1 - * @interface IClusterOperationMetadata - * @property {string|null} [clusterName] ClusterOperationMetadata clusterName - * @property {string|null} [clusterUuid] ClusterOperationMetadata clusterUuid - * @property {google.cloud.dataproc.v1.IClusterOperationStatus|null} [status] ClusterOperationMetadata status - * @property {Array.|null} [statusHistory] ClusterOperationMetadata statusHistory - * @property {string|null} [operationType] ClusterOperationMetadata operationType - * @property {string|null} [description] ClusterOperationMetadata description - * @property {Object.|null} [labels] ClusterOperationMetadata labels - * @property {Array.|null} [warnings] ClusterOperationMetadata warnings + * @interface INodeGroupOperationMetadata + * @property {string|null} [nodeGroupId] NodeGroupOperationMetadata nodeGroupId + * @property {string|null} [clusterUuid] NodeGroupOperationMetadata clusterUuid + * @property {google.cloud.dataproc.v1.IClusterOperationStatus|null} [status] NodeGroupOperationMetadata status + * @property {Array.|null} [statusHistory] NodeGroupOperationMetadata statusHistory + * @property {google.cloud.dataproc.v1.NodeGroupOperationMetadata.NodeGroupOperationType|null} [operationType] NodeGroupOperationMetadata operationType + * @property {string|null} [description] NodeGroupOperationMetadata description + * @property {Object.|null} [labels] NodeGroupOperationMetadata labels + * @property {Array.|null} [warnings] NodeGroupOperationMetadata warnings */ /** - * Constructs a new ClusterOperationMetadata. + * Constructs a new NodeGroupOperationMetadata. * @memberof google.cloud.dataproc.v1 - * @classdesc Represents a ClusterOperationMetadata. - * @implements IClusterOperationMetadata + * @classdesc Represents a NodeGroupOperationMetadata. + * @implements INodeGroupOperationMetadata * @constructor - * @param {google.cloud.dataproc.v1.IClusterOperationMetadata=} [properties] Properties to set + * @param {google.cloud.dataproc.v1.INodeGroupOperationMetadata=} [properties] Properties to set */ - function ClusterOperationMetadata(properties) { + function NodeGroupOperationMetadata(properties) { this.statusHistory = []; this.labels = {}; this.warnings = []; @@ -31989,173 +34231,173 @@ } /** - * ClusterOperationMetadata clusterName. - * @member {string} clusterName - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * NodeGroupOperationMetadata nodeGroupId. + * @member {string} nodeGroupId + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @instance */ - ClusterOperationMetadata.prototype.clusterName = ""; + NodeGroupOperationMetadata.prototype.nodeGroupId = ""; /** - * ClusterOperationMetadata clusterUuid. + * NodeGroupOperationMetadata clusterUuid. * @member {string} clusterUuid - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @instance */ - ClusterOperationMetadata.prototype.clusterUuid = ""; + NodeGroupOperationMetadata.prototype.clusterUuid = ""; /** - * ClusterOperationMetadata status. + * NodeGroupOperationMetadata status. * @member {google.cloud.dataproc.v1.IClusterOperationStatus|null|undefined} status - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @instance */ - ClusterOperationMetadata.prototype.status = null; + NodeGroupOperationMetadata.prototype.status = null; /** - * ClusterOperationMetadata statusHistory. + * NodeGroupOperationMetadata statusHistory. * @member {Array.} statusHistory - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @instance */ - ClusterOperationMetadata.prototype.statusHistory = $util.emptyArray; + NodeGroupOperationMetadata.prototype.statusHistory = $util.emptyArray; /** - * ClusterOperationMetadata operationType. - * @member {string} operationType - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * NodeGroupOperationMetadata operationType. + * @member {google.cloud.dataproc.v1.NodeGroupOperationMetadata.NodeGroupOperationType} operationType + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @instance */ - ClusterOperationMetadata.prototype.operationType = ""; + NodeGroupOperationMetadata.prototype.operationType = 0; /** - * ClusterOperationMetadata description. + * NodeGroupOperationMetadata description. * @member {string} description - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @instance */ - ClusterOperationMetadata.prototype.description = ""; + NodeGroupOperationMetadata.prototype.description = ""; /** - * ClusterOperationMetadata labels. + * NodeGroupOperationMetadata labels. * @member {Object.} labels - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @instance */ - ClusterOperationMetadata.prototype.labels = $util.emptyObject; + NodeGroupOperationMetadata.prototype.labels = $util.emptyObject; /** - * ClusterOperationMetadata warnings. + * NodeGroupOperationMetadata warnings. * @member {Array.} warnings - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @instance */ - ClusterOperationMetadata.prototype.warnings = $util.emptyArray; + NodeGroupOperationMetadata.prototype.warnings = $util.emptyArray; /** - * Creates a new ClusterOperationMetadata instance using the specified properties. + * Creates a new NodeGroupOperationMetadata instance using the specified properties. * @function create - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @static - * @param {google.cloud.dataproc.v1.IClusterOperationMetadata=} [properties] Properties to set - * @returns {google.cloud.dataproc.v1.ClusterOperationMetadata} ClusterOperationMetadata instance + * @param {google.cloud.dataproc.v1.INodeGroupOperationMetadata=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.NodeGroupOperationMetadata} NodeGroupOperationMetadata instance */ - ClusterOperationMetadata.create = function create(properties) { - return new ClusterOperationMetadata(properties); + NodeGroupOperationMetadata.create = function create(properties) { + return new NodeGroupOperationMetadata(properties); }; /** - * Encodes the specified ClusterOperationMetadata message. Does not implicitly {@link google.cloud.dataproc.v1.ClusterOperationMetadata.verify|verify} messages. + * Encodes the specified NodeGroupOperationMetadata message. Does not implicitly {@link google.cloud.dataproc.v1.NodeGroupOperationMetadata.verify|verify} messages. * @function encode - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @static - * @param {google.cloud.dataproc.v1.IClusterOperationMetadata} message ClusterOperationMetadata message or plain object to encode + * @param {google.cloud.dataproc.v1.INodeGroupOperationMetadata} message NodeGroupOperationMetadata message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ClusterOperationMetadata.encode = function encode(message, writer) { + NodeGroupOperationMetadata.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.clusterName != null && Object.hasOwnProperty.call(message, "clusterName")) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.clusterName); + if (message.nodeGroupId != null && Object.hasOwnProperty.call(message, "nodeGroupId")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.nodeGroupId); if (message.clusterUuid != null && Object.hasOwnProperty.call(message, "clusterUuid")) - writer.uint32(/* id 8, wireType 2 =*/66).string(message.clusterUuid); + writer.uint32(/* id 2, wireType 2 =*/18).string(message.clusterUuid); if (message.status != null && Object.hasOwnProperty.call(message, "status")) - $root.google.cloud.dataproc.v1.ClusterOperationStatus.encode(message.status, writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); + $root.google.cloud.dataproc.v1.ClusterOperationStatus.encode(message.status, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); if (message.statusHistory != null && message.statusHistory.length) for (var i = 0; i < message.statusHistory.length; ++i) - $root.google.cloud.dataproc.v1.ClusterOperationStatus.encode(message.statusHistory[i], writer.uint32(/* id 10, wireType 2 =*/82).fork()).ldelim(); + $root.google.cloud.dataproc.v1.ClusterOperationStatus.encode(message.statusHistory[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); if (message.operationType != null && Object.hasOwnProperty.call(message, "operationType")) - writer.uint32(/* id 11, wireType 2 =*/90).string(message.operationType); + writer.uint32(/* id 5, wireType 0 =*/40).int32(message.operationType); if (message.description != null && Object.hasOwnProperty.call(message, "description")) - writer.uint32(/* id 12, wireType 2 =*/98).string(message.description); + writer.uint32(/* id 6, wireType 2 =*/50).string(message.description); if (message.labels != null && Object.hasOwnProperty.call(message, "labels")) for (var keys = Object.keys(message.labels), i = 0; i < keys.length; ++i) - writer.uint32(/* id 13, wireType 2 =*/106).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.labels[keys[i]]).ldelim(); + writer.uint32(/* id 7, wireType 2 =*/58).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.labels[keys[i]]).ldelim(); if (message.warnings != null && message.warnings.length) for (var i = 0; i < message.warnings.length; ++i) - writer.uint32(/* id 14, wireType 2 =*/114).string(message.warnings[i]); + writer.uint32(/* id 8, wireType 2 =*/66).string(message.warnings[i]); return writer; }; /** - * Encodes the specified ClusterOperationMetadata message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.ClusterOperationMetadata.verify|verify} messages. + * Encodes the specified NodeGroupOperationMetadata message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.NodeGroupOperationMetadata.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @static - * @param {google.cloud.dataproc.v1.IClusterOperationMetadata} message ClusterOperationMetadata message or plain object to encode + * @param {google.cloud.dataproc.v1.INodeGroupOperationMetadata} message NodeGroupOperationMetadata message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ClusterOperationMetadata.encodeDelimited = function encodeDelimited(message, writer) { + NodeGroupOperationMetadata.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ClusterOperationMetadata message from the specified reader or buffer. + * Decodes a NodeGroupOperationMetadata message from the specified reader or buffer. * @function decode - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.dataproc.v1.ClusterOperationMetadata} ClusterOperationMetadata + * @returns {google.cloud.dataproc.v1.NodeGroupOperationMetadata} NodeGroupOperationMetadata * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ClusterOperationMetadata.decode = function decode(reader, length) { + NodeGroupOperationMetadata.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.ClusterOperationMetadata(), key, value; + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.NodeGroupOperationMetadata(), key, value; while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { - case 7: { - message.clusterName = reader.string(); + case 1: { + message.nodeGroupId = reader.string(); break; } - case 8: { + case 2: { message.clusterUuid = reader.string(); break; } - case 9: { + case 3: { message.status = $root.google.cloud.dataproc.v1.ClusterOperationStatus.decode(reader, reader.uint32()); break; } - case 10: { + case 4: { if (!(message.statusHistory && message.statusHistory.length)) message.statusHistory = []; message.statusHistory.push($root.google.cloud.dataproc.v1.ClusterOperationStatus.decode(reader, reader.uint32())); break; } - case 11: { - message.operationType = reader.string(); + case 5: { + message.operationType = reader.int32(); break; } - case 12: { + case 6: { message.description = reader.string(); break; } - case 13: { + case 7: { if (message.labels === $util.emptyObject) message.labels = {}; var end2 = reader.uint32() + reader.pos; @@ -32178,7 +34420,7 @@ message.labels[key] = value; break; } - case 14: { + case 8: { if (!(message.warnings && message.warnings.length)) message.warnings = []; message.warnings.push(reader.string()); @@ -32193,35 +34435,35 @@ }; /** - * Decodes a ClusterOperationMetadata message from the specified reader or buffer, length delimited. + * Decodes a NodeGroupOperationMetadata message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.dataproc.v1.ClusterOperationMetadata} ClusterOperationMetadata + * @returns {google.cloud.dataproc.v1.NodeGroupOperationMetadata} NodeGroupOperationMetadata * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ClusterOperationMetadata.decodeDelimited = function decodeDelimited(reader) { + NodeGroupOperationMetadata.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ClusterOperationMetadata message. + * Verifies a NodeGroupOperationMetadata message. * @function verify - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ClusterOperationMetadata.verify = function verify(message) { + NodeGroupOperationMetadata.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.clusterName != null && message.hasOwnProperty("clusterName")) - if (!$util.isString(message.clusterName)) - return "clusterName: string expected"; + if (message.nodeGroupId != null && message.hasOwnProperty("nodeGroupId")) + if (!$util.isString(message.nodeGroupId)) + return "nodeGroupId: string expected"; if (message.clusterUuid != null && message.hasOwnProperty("clusterUuid")) if (!$util.isString(message.clusterUuid)) return "clusterUuid: string expected"; @@ -32240,8 +34482,16 @@ } } if (message.operationType != null && message.hasOwnProperty("operationType")) - if (!$util.isString(message.operationType)) - return "operationType: string expected"; + switch (message.operationType) { + default: + return "operationType: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + break; + } if (message.description != null && message.hasOwnProperty("description")) if (!$util.isString(message.description)) return "description: string expected"; @@ -32264,50 +34514,76 @@ }; /** - * Creates a ClusterOperationMetadata message from a plain object. Also converts values to their respective internal types. + * Creates a NodeGroupOperationMetadata message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @static * @param {Object.} object Plain object - * @returns {google.cloud.dataproc.v1.ClusterOperationMetadata} ClusterOperationMetadata + * @returns {google.cloud.dataproc.v1.NodeGroupOperationMetadata} NodeGroupOperationMetadata */ - ClusterOperationMetadata.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.dataproc.v1.ClusterOperationMetadata) + NodeGroupOperationMetadata.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.NodeGroupOperationMetadata) return object; - var message = new $root.google.cloud.dataproc.v1.ClusterOperationMetadata(); - if (object.clusterName != null) - message.clusterName = String(object.clusterName); + var message = new $root.google.cloud.dataproc.v1.NodeGroupOperationMetadata(); + if (object.nodeGroupId != null) + message.nodeGroupId = String(object.nodeGroupId); if (object.clusterUuid != null) message.clusterUuid = String(object.clusterUuid); if (object.status != null) { if (typeof object.status !== "object") - throw TypeError(".google.cloud.dataproc.v1.ClusterOperationMetadata.status: object expected"); + throw TypeError(".google.cloud.dataproc.v1.NodeGroupOperationMetadata.status: object expected"); message.status = $root.google.cloud.dataproc.v1.ClusterOperationStatus.fromObject(object.status); } if (object.statusHistory) { if (!Array.isArray(object.statusHistory)) - throw TypeError(".google.cloud.dataproc.v1.ClusterOperationMetadata.statusHistory: array expected"); + throw TypeError(".google.cloud.dataproc.v1.NodeGroupOperationMetadata.statusHistory: array expected"); message.statusHistory = []; for (var i = 0; i < object.statusHistory.length; ++i) { if (typeof object.statusHistory[i] !== "object") - throw TypeError(".google.cloud.dataproc.v1.ClusterOperationMetadata.statusHistory: object expected"); + throw TypeError(".google.cloud.dataproc.v1.NodeGroupOperationMetadata.statusHistory: object expected"); message.statusHistory[i] = $root.google.cloud.dataproc.v1.ClusterOperationStatus.fromObject(object.statusHistory[i]); } } - if (object.operationType != null) - message.operationType = String(object.operationType); + switch (object.operationType) { + default: + if (typeof object.operationType === "number") { + message.operationType = object.operationType; + break; + } + break; + case "NODE_GROUP_OPERATION_TYPE_UNSPECIFIED": + case 0: + message.operationType = 0; + break; + case "CREATE": + case 1: + message.operationType = 1; + break; + case "UPDATE": + case 2: + message.operationType = 2; + break; + case "DELETE": + case 3: + message.operationType = 3; + break; + case "RESIZE": + case 4: + message.operationType = 4; + break; + } if (object.description != null) message.description = String(object.description); if (object.labels) { if (typeof object.labels !== "object") - throw TypeError(".google.cloud.dataproc.v1.ClusterOperationMetadata.labels: object expected"); + throw TypeError(".google.cloud.dataproc.v1.NodeGroupOperationMetadata.labels: object expected"); message.labels = {}; for (var keys = Object.keys(object.labels), i = 0; i < keys.length; ++i) message.labels[keys[i]] = String(object.labels[keys[i]]); } if (object.warnings) { if (!Array.isArray(object.warnings)) - throw TypeError(".google.cloud.dataproc.v1.ClusterOperationMetadata.warnings: array expected"); + throw TypeError(".google.cloud.dataproc.v1.NodeGroupOperationMetadata.warnings: array expected"); message.warnings = []; for (var i = 0; i < object.warnings.length; ++i) message.warnings[i] = String(object.warnings[i]); @@ -32316,15 +34592,15 @@ }; /** - * Creates a plain object from a ClusterOperationMetadata message. Also converts values to other types if specified. + * Creates a plain object from a NodeGroupOperationMetadata message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @static - * @param {google.cloud.dataproc.v1.ClusterOperationMetadata} message ClusterOperationMetadata + * @param {google.cloud.dataproc.v1.NodeGroupOperationMetadata} message NodeGroupOperationMetadata * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ClusterOperationMetadata.toObject = function toObject(message, options) { + NodeGroupOperationMetadata.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; @@ -32335,14 +34611,14 @@ if (options.objects || options.defaults) object.labels = {}; if (options.defaults) { - object.clusterName = ""; + object.nodeGroupId = ""; object.clusterUuid = ""; object.status = null; - object.operationType = ""; + object.operationType = options.enums === String ? "NODE_GROUP_OPERATION_TYPE_UNSPECIFIED" : 0; object.description = ""; } - if (message.clusterName != null && message.hasOwnProperty("clusterName")) - object.clusterName = message.clusterName; + if (message.nodeGroupId != null && message.hasOwnProperty("nodeGroupId")) + object.nodeGroupId = message.nodeGroupId; if (message.clusterUuid != null && message.hasOwnProperty("clusterUuid")) object.clusterUuid = message.clusterUuid; if (message.status != null && message.hasOwnProperty("status")) @@ -32353,7 +34629,7 @@ object.statusHistory[j] = $root.google.cloud.dataproc.v1.ClusterOperationStatus.toObject(message.statusHistory[j], options); } if (message.operationType != null && message.hasOwnProperty("operationType")) - object.operationType = message.operationType; + object.operationType = options.enums === String ? $root.google.cloud.dataproc.v1.NodeGroupOperationMetadata.NodeGroupOperationType[message.operationType] === undefined ? message.operationType : $root.google.cloud.dataproc.v1.NodeGroupOperationMetadata.NodeGroupOperationType[message.operationType] : message.operationType; if (message.description != null && message.hasOwnProperty("description")) object.description = message.description; var keys2; @@ -32371,32 +34647,52 @@ }; /** - * Converts this ClusterOperationMetadata to JSON. + * Converts this NodeGroupOperationMetadata to JSON. * @function toJSON - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @instance * @returns {Object.} JSON object */ - ClusterOperationMetadata.prototype.toJSON = function toJSON() { + NodeGroupOperationMetadata.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ClusterOperationMetadata + * Gets the default type url for NodeGroupOperationMetadata * @function getTypeUrl - * @memberof google.cloud.dataproc.v1.ClusterOperationMetadata + * @memberof google.cloud.dataproc.v1.NodeGroupOperationMetadata * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ClusterOperationMetadata.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + NodeGroupOperationMetadata.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/google.cloud.dataproc.v1.ClusterOperationMetadata"; + return typeUrlPrefix + "/google.cloud.dataproc.v1.NodeGroupOperationMetadata"; }; - return ClusterOperationMetadata; + /** + * NodeGroupOperationType enum. + * @name google.cloud.dataproc.v1.NodeGroupOperationMetadata.NodeGroupOperationType + * @enum {number} + * @property {number} NODE_GROUP_OPERATION_TYPE_UNSPECIFIED=0 NODE_GROUP_OPERATION_TYPE_UNSPECIFIED value + * @property {number} CREATE=1 CREATE value + * @property {number} UPDATE=2 UPDATE value + * @property {number} DELETE=3 DELETE value + * @property {number} RESIZE=4 RESIZE value + */ + NodeGroupOperationMetadata.NodeGroupOperationType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "NODE_GROUP_OPERATION_TYPE_UNSPECIFIED"] = 0; + values[valuesById[1] = "CREATE"] = 1; + values[valuesById[2] = "UPDATE"] = 2; + values[valuesById[3] = "DELETE"] = 3; + values[valuesById[4] = "RESIZE"] = 4; + return values; + })(); + + return NodeGroupOperationMetadata; })(); v1.WorkflowTemplateService = (function() { diff --git a/packages/google-cloud-dataproc/protos/protos.json b/packages/google-cloud-dataproc/protos/protos.json index e74226d2c80..ffc3ccc73b5 100644 --- a/packages/google-cloud-dataproc/protos/protos.json +++ b/packages/google-cloud-dataproc/protos/protos.json @@ -12,8 +12,8 @@ "java_multiple_files": true, "java_outer_classname": "WorkflowTemplatesProto", "java_package": "com.google.cloud.dataproc.v1", - "(google.api.resource_definition).type": "metastore.googleapis.com/Service", - "(google.api.resource_definition).pattern": "projects/{project}/locations/{location}/services/{service}" + "(google.api.resource_definition).type": "dataproc.googleapis.com/ClusterRegion", + "(google.api.resource_definition).pattern": "projects/{project}/regions/{region}/clusters/{cluster}" }, "nested": { "AutoscalingPolicyService": { @@ -1665,6 +1665,14 @@ "options": { "(google.api.field_behavior)": "OPTIONAL" } + }, + "auxiliaryNodeGroups": { + "rule": "repeated", + "type": "AuxiliaryNodeGroup", + "id": 25, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } } } }, @@ -2055,6 +2063,67 @@ } } }, + "AuxiliaryNodeGroup": { + "fields": { + "nodeGroup": { + "type": "NodeGroup", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "nodeGroupId": { + "type": "string", + "id": 2, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + } + } + }, + "NodeGroup": { + "options": { + "(google.api.resource).type": "dataproc.googleapis.com/NodeGroup", + "(google.api.resource).pattern": "projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}" + }, + "fields": { + "name": { + "type": "string", + "id": 1 + }, + "roles": { + "rule": "repeated", + "type": "Role", + "id": 2, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "nodeGroupConfig": { + "type": "InstanceGroupConfig", + "id": 3, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + }, + "labels": { + "keyType": "string", + "type": "string", + "id": 4, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + } + }, + "nested": { + "Role": { + "values": { + "ROLE_UNSPECIFIED": 0, + "DRIVER": 1 + } + } + } + }, "NodeInitializationAction": { "fields": { "executableFile": { @@ -3718,6 +3787,31 @@ "options": { "(google.api.field_behavior)": "OUTPUT_ONLY" } + }, + "driverSchedulingConfig": { + "type": "DriverSchedulingConfig", + "id": 27, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + } + } + }, + "DriverSchedulingConfig": { + "fields": { + "memoryMb": { + "type": "int32", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "vcores": { + "type": "int32", + "id": 2, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } } } }, @@ -3998,6 +4092,165 @@ } } }, + "NodeGroupController": { + "options": { + "(google.api.default_host)": "dataproc.googleapis.com", + "(google.api.oauth_scopes)": "https://www.googleapis.com/auth/cloud-platform" + }, + "methods": { + "CreateNodeGroup": { + "requestType": "CreateNodeGroupRequest", + "responseType": "google.longrunning.Operation", + "options": { + "(google.api.http).post": "/v1/{parent=projects/*/regions/*/clusters/*}/nodeGroups", + "(google.api.http).body": "node_group", + "(google.api.method_signature)": "parent,node_group,node_group_id", + "(google.longrunning.operation_info).response_type": "NodeGroup", + "(google.longrunning.operation_info).metadata_type": "google.cloud.dataproc.v1.NodeGroupOperationMetadata" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{parent=projects/*/regions/*/clusters/*}/nodeGroups", + "body": "node_group" + } + }, + { + "(google.api.method_signature)": "parent,node_group,node_group_id" + }, + { + "(google.longrunning.operation_info)": { + "response_type": "NodeGroup", + "metadata_type": "google.cloud.dataproc.v1.NodeGroupOperationMetadata" + } + } + ] + }, + "ResizeNodeGroup": { + "requestType": "ResizeNodeGroupRequest", + "responseType": "google.longrunning.Operation", + "options": { + "(google.api.http).post": "/v1/{name=projects/*/regions/*/clusters/*/nodeGroups/*}:resize", + "(google.api.http).body": "*", + "(google.api.method_signature)": "name,size", + "(google.longrunning.operation_info).response_type": "NodeGroup", + "(google.longrunning.operation_info).metadata_type": "google.cloud.dataproc.v1.NodeGroupOperationMetadata" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{name=projects/*/regions/*/clusters/*/nodeGroups/*}:resize", + "body": "*" + } + }, + { + "(google.api.method_signature)": "name,size" + }, + { + "(google.longrunning.operation_info)": { + "response_type": "NodeGroup", + "metadata_type": "google.cloud.dataproc.v1.NodeGroupOperationMetadata" + } + } + ] + }, + "GetNodeGroup": { + "requestType": "GetNodeGroupRequest", + "responseType": "NodeGroup", + "options": { + "(google.api.http).get": "/v1/{name=projects/*/regions/*/clusters/*/nodeGroups/*}", + "(google.api.method_signature)": "name" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "get": "/v1/{name=projects/*/regions/*/clusters/*/nodeGroups/*}" + } + }, + { + "(google.api.method_signature)": "name" + } + ] + } + } + }, + "CreateNodeGroupRequest": { + "fields": { + "parent": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED", + "(google.api.resource_reference).child_type": "dataproc.googleapis.com/NodeGroup" + } + }, + "nodeGroup": { + "type": "NodeGroup", + "id": 2, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "nodeGroupId": { + "type": "string", + "id": 4, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + }, + "requestId": { + "type": "string", + "id": 3, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + } + } + }, + "ResizeNodeGroupRequest": { + "fields": { + "name": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "size": { + "type": "int32", + "id": 2, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "requestId": { + "type": "string", + "id": 3, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + }, + "gracefulDecommissionTimeout": { + "type": "google.protobuf.Duration", + "id": 4, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + } + } + }, + "GetNodeGroupRequest": { + "fields": { + "name": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED", + "(google.api.resource_reference).type": "dataproc.googleapis.com/NodeGroup" + } + } + } + }, "BatchOperationMetadata": { "fields": { "batch": { @@ -4149,6 +4402,77 @@ } } }, + "NodeGroupOperationMetadata": { + "fields": { + "nodeGroupId": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + }, + "clusterUuid": { + "type": "string", + "id": 2, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + }, + "status": { + "type": "ClusterOperationStatus", + "id": 3, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + }, + "statusHistory": { + "rule": "repeated", + "type": "ClusterOperationStatus", + "id": 4, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + }, + "operationType": { + "type": "NodeGroupOperationType", + "id": 5 + }, + "description": { + "type": "string", + "id": 6, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + }, + "labels": { + "keyType": "string", + "type": "string", + "id": 7, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + }, + "warnings": { + "rule": "repeated", + "type": "string", + "id": 8, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + } + }, + "nested": { + "NodeGroupOperationType": { + "values": { + "NODE_GROUP_OPERATION_TYPE_UNSPECIFIED": 0, + "CREATE": 1, + "UPDATE": 2, + "DELETE": 3, + "RESIZE": 4 + } + } + } + }, "WorkflowTemplateService": { "options": { "(google.api.default_host)": "dataproc.googleapis.com", diff --git a/packages/google-cloud-dataproc/samples/README.md b/packages/google-cloud-dataproc/samples/README.md index 0edb48bae92..b47386ba2df 100644 --- a/packages/google-cloud-dataproc/samples/README.md +++ b/packages/google-cloud-dataproc/samples/README.md @@ -36,6 +36,9 @@ * [Job_controller.submit_job](#job_controller.submit_job) * [Job_controller.submit_job_as_operation](#job_controller.submit_job_as_operation) * [Job_controller.update_job](#job_controller.update_job) + * [Node_group_controller.create_node_group](#node_group_controller.create_node_group) + * [Node_group_controller.get_node_group](#node_group_controller.get_node_group) + * [Node_group_controller.resize_node_group](#node_group_controller.resize_node_group) * [Workflow_template_service.create_workflow_template](#workflow_template_service.create_workflow_template) * [Workflow_template_service.delete_workflow_template](#workflow_template_service.delete_workflow_template) * [Workflow_template_service.get_workflow_template](#workflow_template_service.get_workflow_template) @@ -470,6 +473,57 @@ __Usage:__ +### Node_group_controller.create_node_group + +View the [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.create_node_group.js). + +[![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.create_node_group.js,samples/README.md) + +__Usage:__ + + +`node packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.create_node_group.js` + + +----- + + + + +### Node_group_controller.get_node_group + +View the [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.get_node_group.js). + +[![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.get_node_group.js,samples/README.md) + +__Usage:__ + + +`node packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.get_node_group.js` + + +----- + + + + +### Node_group_controller.resize_node_group + +View the [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.resize_node_group.js). + +[![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-node&page=editor&open_in_editor=packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.resize_node_group.js,samples/README.md) + +__Usage:__ + + +`node packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.resize_node_group.js` + + +----- + + + + ### Workflow_template_service.create_workflow_template View the [source code](https://github.com/googleapis/google-cloud-node/blob/main/packages/google-cloud-dataproc/samples/generated/v1/workflow_template_service.create_workflow_template.js). diff --git a/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.create_cluster.js b/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.create_cluster.js index 8a8be5b9122..23d696ca9de 100644 --- a/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.create_cluster.js +++ b/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.create_cluster.js @@ -42,11 +42,12 @@ function main(projectId, region, cluster) { */ // const cluster = {} /** - * Optional. A unique ID used to identify the request. If the server receives two + * Optional. A unique ID used to identify the request. If the server receives + * two * CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s * with the same id, then the second request will be ignored and the - * first google.longrunning.Operation google.longrunning.Operation created and stored in the backend - * is returned. + * first google.longrunning.Operation google.longrunning.Operation created + * and stored in the backend is returned. * It is recommended to always set this value to a * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). * The ID must contain only letters (a-z, A-Z), numbers (0-9), diff --git a/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.delete_cluster.js b/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.delete_cluster.js index ee047da8c3d..0105fcb45ab 100644 --- a/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.delete_cluster.js +++ b/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.delete_cluster.js @@ -51,8 +51,8 @@ function main(projectId, region, clusterName) { * receives two * DeleteClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s * with the same id, then the second request will be ignored and the - * first google.longrunning.Operation google.longrunning.Operation created and stored in the - * backend is returned. + * first google.longrunning.Operation google.longrunning.Operation created + * and stored in the backend is returned. * It is recommended to always set this value to a * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). * The ID must contain only letters (a-z, A-Z), numbers (0-9), diff --git a/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.start_cluster.js b/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.start_cluster.js index a84afdf0e8f..b79b9108271 100644 --- a/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.start_cluster.js +++ b/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.start_cluster.js @@ -51,8 +51,8 @@ function main(projectId, region, clusterName) { * receives two * StartClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s * with the same id, then the second request will be ignored and the - * first google.longrunning.Operation google.longrunning.Operation created and stored in the - * backend is returned. + * first google.longrunning.Operation google.longrunning.Operation created + * and stored in the backend is returned. * Recommendation: Set this value to a * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). * The ID must contain only letters (a-z, A-Z), numbers (0-9), diff --git a/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.stop_cluster.js b/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.stop_cluster.js index 2322ef9b961..c3cf29a9dd3 100644 --- a/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.stop_cluster.js +++ b/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.stop_cluster.js @@ -51,8 +51,8 @@ function main(projectId, region, clusterName) { * receives two * StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s * with the same id, then the second request will be ignored and the - * first google.longrunning.Operation google.longrunning.Operation created and stored in the - * backend is returned. + * first google.longrunning.Operation google.longrunning.Operation created + * and stored in the backend is returned. * Recommendation: Set this value to a * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). * The ID must contain only letters (a-z, A-Z), numbers (0-9), diff --git a/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.update_cluster.js b/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.update_cluster.js index 407699f1519..13faa39caa3 100644 --- a/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.update_cluster.js +++ b/packages/google-cloud-dataproc/samples/generated/v1/cluster_controller.update_cluster.js @@ -112,8 +112,8 @@ function main(projectId, region, clusterName, cluster, updateMask) { * receives two * UpdateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s * with the same id, then the second request will be ignored and the - * first google.longrunning.Operation google.longrunning.Operation created and stored in the - * backend is returned. + * first google.longrunning.Operation google.longrunning.Operation created + * and stored in the backend is returned. * It is recommended to always set this value to a * UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier). * The ID must contain only letters (a-z, A-Z), numbers (0-9), diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.create_node_group.js b/packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.create_node_group.js similarity index 100% rename from owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.create_node_group.js rename to packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.create_node_group.js diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.get_node_group.js b/packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.get_node_group.js similarity index 100% rename from owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.get_node_group.js rename to packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.get_node_group.js diff --git a/owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.resize_node_group.js b/packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.resize_node_group.js similarity index 100% rename from owl-bot-staging/google-cloud-dataproc/v1/samples/generated/v1/node_group_controller.resize_node_group.js rename to packages/google-cloud-dataproc/samples/generated/v1/node_group_controller.resize_node_group.js diff --git a/packages/google-cloud-dataproc/samples/generated/v1/snippet_metadata.google.cloud.dataproc.v1.json b/packages/google-cloud-dataproc/samples/generated/v1/snippet_metadata.google.cloud.dataproc.v1.json index b989facebe0..cc0c3d7e362 100644 --- a/packages/google-cloud-dataproc/samples/generated/v1/snippet_metadata.google.cloud.dataproc.v1.json +++ b/packages/google-cloud-dataproc/samples/generated/v1/snippet_metadata.google.cloud.dataproc.v1.json @@ -414,7 +414,7 @@ "segments": [ { "start": 25, - "end": 81, + "end": 82, "type": "FULL" } ], @@ -1211,6 +1211,150 @@ } } }, + { + "regionTag": "dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async", + "title": "dataproc createNodeGroup Sample", + "origin": "API_DEFINITION", + "description": " Creates a node group in a cluster. The returned [Operation.metadata][google.longrunning.Operation.metadata] is [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", + "canonical": true, + "file": "node_group_controller.create_node_group.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 80, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CreateNodeGroup", + "fullName": "google.cloud.dataproc.v1.NodeGroupController.CreateNodeGroup", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "node_group", + "type": ".google.cloud.dataproc.v1.NodeGroup" + }, + { + "name": "node_group_id", + "type": "TYPE_STRING" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "NodeGroupControllerClient", + "fullName": "google.cloud.dataproc.v1.NodeGroupControllerClient" + }, + "method": { + "shortName": "CreateNodeGroup", + "fullName": "google.cloud.dataproc.v1.NodeGroupController.CreateNodeGroup", + "service": { + "shortName": "NodeGroupController", + "fullName": "google.cloud.dataproc.v1.NodeGroupController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async", + "title": "dataproc resizeNodeGroup Sample", + "origin": "API_DEFINITION", + "description": " Resizes a node group in a cluster. The returned [Operation.metadata][google.longrunning.Operation.metadata] is [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", + "canonical": true, + "file": "node_group_controller.resize_node_group.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 90, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ResizeNodeGroup", + "fullName": "google.cloud.dataproc.v1.NodeGroupController.ResizeNodeGroup", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + }, + { + "name": "size", + "type": "TYPE_INT32" + }, + { + "name": "request_id", + "type": "TYPE_STRING" + }, + { + "name": "graceful_decommission_timeout", + "type": ".google.protobuf.Duration" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "NodeGroupControllerClient", + "fullName": "google.cloud.dataproc.v1.NodeGroupControllerClient" + }, + "method": { + "shortName": "ResizeNodeGroup", + "fullName": "google.cloud.dataproc.v1.NodeGroupController.ResizeNodeGroup", + "service": { + "shortName": "NodeGroupController", + "fullName": "google.cloud.dataproc.v1.NodeGroupController" + } + } + } + }, + { + "regionTag": "dataproc_v1_generated_NodeGroupController_GetNodeGroup_async", + "title": "dataproc getNodeGroup Sample", + "origin": "API_DEFINITION", + "description": " Gets the resource representation for a node group in a cluster.", + "canonical": true, + "file": "node_group_controller.get_node_group.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 55, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetNodeGroup", + "fullName": "google.cloud.dataproc.v1.NodeGroupController.GetNodeGroup", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.dataproc.v1.NodeGroup", + "client": { + "shortName": "NodeGroupControllerClient", + "fullName": "google.cloud.dataproc.v1.NodeGroupControllerClient" + }, + "method": { + "shortName": "GetNodeGroup", + "fullName": "google.cloud.dataproc.v1.NodeGroupController.GetNodeGroup", + "service": { + "shortName": "NodeGroupController", + "fullName": "google.cloud.dataproc.v1.NodeGroupController" + } + } + } + }, { "regionTag": "dataproc_v1_generated_WorkflowTemplateService_CreateWorkflowTemplate_async", "title": "dataproc createWorkflowTemplate Sample", diff --git a/packages/google-cloud-dataproc/src/index.ts b/packages/google-cloud-dataproc/src/index.ts index 927427e706c..064e6476928 100644 --- a/packages/google-cloud-dataproc/src/index.ts +++ b/packages/google-cloud-dataproc/src/index.ts @@ -26,6 +26,8 @@ const ClusterControllerClient = v1.ClusterControllerClient; type ClusterControllerClient = v1.ClusterControllerClient; const JobControllerClient = v1.JobControllerClient; type JobControllerClient = v1.JobControllerClient; +const NodeGroupControllerClient = v1.NodeGroupControllerClient; +type NodeGroupControllerClient = v1.NodeGroupControllerClient; const WorkflowTemplateServiceClient = v1.WorkflowTemplateServiceClient; type WorkflowTemplateServiceClient = v1.WorkflowTemplateServiceClient; @@ -35,6 +37,7 @@ export { BatchControllerClient, ClusterControllerClient, JobControllerClient, + NodeGroupControllerClient, WorkflowTemplateServiceClient, }; export default { @@ -43,6 +46,7 @@ export default { BatchControllerClient, ClusterControllerClient, JobControllerClient, + NodeGroupControllerClient, WorkflowTemplateServiceClient, }; import * as protos from '../protos/protos'; diff --git a/packages/google-cloud-dataproc/src/v1/autoscaling_policy_service_client.ts b/packages/google-cloud-dataproc/src/v1/autoscaling_policy_service_client.ts index 03a2347f246..5c1ee5b86b6 100644 --- a/packages/google-cloud-dataproc/src/v1/autoscaling_policy_service_client.ts +++ b/packages/google-cloud-dataproc/src/v1/autoscaling_policy_service_client.ts @@ -122,6 +122,9 @@ export class AutoscalingPolicyServiceClient { (typeof window !== 'undefined' && typeof window?.fetch === 'function'); opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { opts['scopes'] = staticMembers.scopes; @@ -183,6 +186,9 @@ export class AutoscalingPolicyServiceClient { locationPathTemplate: new this._gaxModule.PathTemplate( 'projects/{project}/locations/{location}' ), + nodeGroupPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' + ), projectPathTemplate: new this._gaxModule.PathTemplate( 'projects/{project}' ), @@ -1097,6 +1103,76 @@ export class AutoscalingPolicyServiceClient { return this.pathTemplates.locationPathTemplate.match(locationName).location; } + /** + * Return a fully-qualified nodeGroup resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} cluster + * @param {string} node_group + * @returns {string} Resource name string. + */ + nodeGroupPath( + project: string, + region: string, + cluster: string, + nodeGroup: string + ) { + return this.pathTemplates.nodeGroupPathTemplate.render({ + project: project, + region: region, + cluster: cluster, + node_group: nodeGroup, + }); + } + + /** + * Parse the project from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the project. + */ + matchProjectFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .project; + } + + /** + * Parse the region from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the region. + */ + matchRegionFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; + } + + /** + * Parse the cluster from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the cluster. + */ + matchClusterFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .cluster; + } + + /** + * Parse the node_group from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the node_group. + */ + matchNodeGroupFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .node_group; + } + /** * Return a fully-qualified project resource name string. * diff --git a/packages/google-cloud-dataproc/src/v1/autoscaling_policy_service_proto_list.json b/packages/google-cloud-dataproc/src/v1/autoscaling_policy_service_proto_list.json index b26a9be7c5f..3bb7ccf055a 100644 --- a/packages/google-cloud-dataproc/src/v1/autoscaling_policy_service_proto_list.json +++ b/packages/google-cloud-dataproc/src/v1/autoscaling_policy_service_proto_list.json @@ -3,6 +3,7 @@ "../../protos/google/cloud/dataproc/v1/batches.proto", "../../protos/google/cloud/dataproc/v1/clusters.proto", "../../protos/google/cloud/dataproc/v1/jobs.proto", + "../../protos/google/cloud/dataproc/v1/node_groups.proto", "../../protos/google/cloud/dataproc/v1/operations.proto", "../../protos/google/cloud/dataproc/v1/shared.proto", "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" diff --git a/packages/google-cloud-dataproc/src/v1/batch_controller_client.ts b/packages/google-cloud-dataproc/src/v1/batch_controller_client.ts index 8e8f33fba02..df5ab753b32 100644 --- a/packages/google-cloud-dataproc/src/v1/batch_controller_client.ts +++ b/packages/google-cloud-dataproc/src/v1/batch_controller_client.ts @@ -123,6 +123,9 @@ export class BatchControllerClient { (typeof window !== 'undefined' && typeof window?.fetch === 'function'); opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { opts['scopes'] = staticMembers.scopes; @@ -184,6 +187,9 @@ export class BatchControllerClient { locationPathTemplate: new this._gaxModule.PathTemplate( 'projects/{project}/locations/{location}' ), + nodeGroupPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' + ), projectPathTemplate: new this._gaxModule.PathTemplate( 'projects/{project}' ), @@ -1105,6 +1111,76 @@ export class BatchControllerClient { return this.pathTemplates.locationPathTemplate.match(locationName).location; } + /** + * Return a fully-qualified nodeGroup resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} cluster + * @param {string} node_group + * @returns {string} Resource name string. + */ + nodeGroupPath( + project: string, + region: string, + cluster: string, + nodeGroup: string + ) { + return this.pathTemplates.nodeGroupPathTemplate.render({ + project: project, + region: region, + cluster: cluster, + node_group: nodeGroup, + }); + } + + /** + * Parse the project from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the project. + */ + matchProjectFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .project; + } + + /** + * Parse the region from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the region. + */ + matchRegionFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; + } + + /** + * Parse the cluster from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the cluster. + */ + matchClusterFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .cluster; + } + + /** + * Parse the node_group from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the node_group. + */ + matchNodeGroupFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .node_group; + } + /** * Return a fully-qualified project resource name string. * diff --git a/packages/google-cloud-dataproc/src/v1/batch_controller_proto_list.json b/packages/google-cloud-dataproc/src/v1/batch_controller_proto_list.json index b26a9be7c5f..3bb7ccf055a 100644 --- a/packages/google-cloud-dataproc/src/v1/batch_controller_proto_list.json +++ b/packages/google-cloud-dataproc/src/v1/batch_controller_proto_list.json @@ -3,6 +3,7 @@ "../../protos/google/cloud/dataproc/v1/batches.proto", "../../protos/google/cloud/dataproc/v1/clusters.proto", "../../protos/google/cloud/dataproc/v1/jobs.proto", + "../../protos/google/cloud/dataproc/v1/node_groups.proto", "../../protos/google/cloud/dataproc/v1/operations.proto", "../../protos/google/cloud/dataproc/v1/shared.proto", "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" diff --git a/packages/google-cloud-dataproc/src/v1/cluster_controller_client.ts b/packages/google-cloud-dataproc/src/v1/cluster_controller_client.ts index 9403ad69ad5..da00ad24c15 100644 --- a/packages/google-cloud-dataproc/src/v1/cluster_controller_client.ts +++ b/packages/google-cloud-dataproc/src/v1/cluster_controller_client.ts @@ -124,6 +124,9 @@ export class ClusterControllerClient { (typeof window !== 'undefined' && typeof window?.fetch === 'function'); opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { opts['scopes'] = staticMembers.scopes; @@ -182,6 +185,9 @@ export class ClusterControllerClient { batchPathTemplate: new this._gaxModule.PathTemplate( 'projects/{project}/locations/{location}/batches/{batch}' ), + nodeGroupPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' + ), projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' @@ -659,11 +665,12 @@ export class ClusterControllerClient { * @param {google.cloud.dataproc.v1.Cluster} request.cluster * Required. The cluster to create. * @param {string} [request.requestId] - * Optional. A unique ID used to identify the request. If the server receives two + * Optional. A unique ID used to identify the request. If the server receives + * two * [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s * with the same id, then the second request will be ignored and the - * first {@link google.longrunning.Operation|google.longrunning.Operation} created and stored in the backend - * is returned. + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. * * It is recommended to always set this value to a * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). @@ -808,7 +815,8 @@ export class ClusterControllerClient { * Updates a cluster in a project. The returned * {@link google.longrunning.Operation.metadata|Operation.metadata} will be * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - * The cluster must be in a {@link google.cloud.dataproc.v1.ClusterStatus.State|`RUNNING`} state or an error + * The cluster must be in a + * {@link google.cloud.dataproc.v1.ClusterStatus.State|`RUNNING`} state or an error * is returned. * * @param {Object} request @@ -889,8 +897,8 @@ export class ClusterControllerClient { * receives two * [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s * with the same id, then the second request will be ignored and the - * first {@link google.longrunning.Operation|google.longrunning.Operation} created and stored in the - * backend is returned. + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. * * It is recommended to always set this value to a * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). @@ -1050,8 +1058,8 @@ export class ClusterControllerClient { * receives two * [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s * with the same id, then the second request will be ignored and the - * first {@link google.longrunning.Operation|google.longrunning.Operation} created and stored in the - * backend is returned. + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. * * Recommendation: Set this value to a * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). @@ -1211,8 +1219,8 @@ export class ClusterControllerClient { * receives two * [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s * with the same id, then the second request will be ignored and the - * first {@link google.longrunning.Operation|google.longrunning.Operation} created and stored in the - * backend is returned. + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. * * Recommendation: Set this value to a * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). @@ -1374,8 +1382,8 @@ export class ClusterControllerClient { * receives two * [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s * with the same id, then the second request will be ignored and the - * first {@link google.longrunning.Operation|google.longrunning.Operation} created and stored in the - * backend is returned. + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. * * It is recommended to always set this value to a * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). @@ -1974,6 +1982,76 @@ export class ClusterControllerClient { return this.pathTemplates.batchPathTemplate.match(batchName).batch; } + /** + * Return a fully-qualified nodeGroup resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} cluster + * @param {string} node_group + * @returns {string} Resource name string. + */ + nodeGroupPath( + project: string, + region: string, + cluster: string, + nodeGroup: string + ) { + return this.pathTemplates.nodeGroupPathTemplate.render({ + project: project, + region: region, + cluster: cluster, + node_group: nodeGroup, + }); + } + + /** + * Parse the project from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the project. + */ + matchProjectFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .project; + } + + /** + * Parse the region from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the region. + */ + matchRegionFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; + } + + /** + * Parse the cluster from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the cluster. + */ + matchClusterFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .cluster; + } + + /** + * Parse the node_group from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the node_group. + */ + matchNodeGroupFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .node_group; + } + /** * Return a fully-qualified projectLocationAutoscalingPolicy resource name string. * diff --git a/packages/google-cloud-dataproc/src/v1/cluster_controller_proto_list.json b/packages/google-cloud-dataproc/src/v1/cluster_controller_proto_list.json index b26a9be7c5f..3bb7ccf055a 100644 --- a/packages/google-cloud-dataproc/src/v1/cluster_controller_proto_list.json +++ b/packages/google-cloud-dataproc/src/v1/cluster_controller_proto_list.json @@ -3,6 +3,7 @@ "../../protos/google/cloud/dataproc/v1/batches.proto", "../../protos/google/cloud/dataproc/v1/clusters.proto", "../../protos/google/cloud/dataproc/v1/jobs.proto", + "../../protos/google/cloud/dataproc/v1/node_groups.proto", "../../protos/google/cloud/dataproc/v1/operations.proto", "../../protos/google/cloud/dataproc/v1/shared.proto", "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" diff --git a/packages/google-cloud-dataproc/src/v1/gapic_metadata.json b/packages/google-cloud-dataproc/src/v1/gapic_metadata.json index 6f5c9ee3ed9..36b4ef4bf5d 100644 --- a/packages/google-cloud-dataproc/src/v1/gapic_metadata.json +++ b/packages/google-cloud-dataproc/src/v1/gapic_metadata.json @@ -317,6 +317,50 @@ } } }, + "NodeGroupController": { + "clients": { + "grpc": { + "libraryClient": "NodeGroupControllerClient", + "rpcs": { + "GetNodeGroup": { + "methods": [ + "getNodeGroup" + ] + }, + "CreateNodeGroup": { + "methods": [ + "createNodeGroup" + ] + }, + "ResizeNodeGroup": { + "methods": [ + "resizeNodeGroup" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "NodeGroupControllerClient", + "rpcs": { + "GetNodeGroup": { + "methods": [ + "getNodeGroup" + ] + }, + "CreateNodeGroup": { + "methods": [ + "createNodeGroup" + ] + }, + "ResizeNodeGroup": { + "methods": [ + "resizeNodeGroup" + ] + } + } + } + } + }, "WorkflowTemplateService": { "clients": { "grpc": { diff --git a/packages/google-cloud-dataproc/src/v1/index.ts b/packages/google-cloud-dataproc/src/v1/index.ts index 098c50ae61b..10f41804708 100644 --- a/packages/google-cloud-dataproc/src/v1/index.ts +++ b/packages/google-cloud-dataproc/src/v1/index.ts @@ -20,4 +20,5 @@ export {AutoscalingPolicyServiceClient} from './autoscaling_policy_service_clien export {BatchControllerClient} from './batch_controller_client'; export {ClusterControllerClient} from './cluster_controller_client'; export {JobControllerClient} from './job_controller_client'; +export {NodeGroupControllerClient} from './node_group_controller_client'; export {WorkflowTemplateServiceClient} from './workflow_template_service_client'; diff --git a/packages/google-cloud-dataproc/src/v1/job_controller_client.ts b/packages/google-cloud-dataproc/src/v1/job_controller_client.ts index c1eb5ddf413..2472cbfc9ed 100644 --- a/packages/google-cloud-dataproc/src/v1/job_controller_client.ts +++ b/packages/google-cloud-dataproc/src/v1/job_controller_client.ts @@ -123,6 +123,9 @@ export class JobControllerClient { (typeof window !== 'undefined' && typeof window?.fetch === 'function'); opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { opts['scopes'] = staticMembers.scopes; @@ -181,6 +184,9 @@ export class JobControllerClient { batchPathTemplate: new this._gaxModule.PathTemplate( 'projects/{project}/locations/{location}/batches/{batch}' ), + nodeGroupPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' + ), projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' @@ -1445,6 +1451,76 @@ export class JobControllerClient { return this.pathTemplates.batchPathTemplate.match(batchName).batch; } + /** + * Return a fully-qualified nodeGroup resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} cluster + * @param {string} node_group + * @returns {string} Resource name string. + */ + nodeGroupPath( + project: string, + region: string, + cluster: string, + nodeGroup: string + ) { + return this.pathTemplates.nodeGroupPathTemplate.render({ + project: project, + region: region, + cluster: cluster, + node_group: nodeGroup, + }); + } + + /** + * Parse the project from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the project. + */ + matchProjectFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .project; + } + + /** + * Parse the region from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the region. + */ + matchRegionFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; + } + + /** + * Parse the cluster from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the cluster. + */ + matchClusterFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .cluster; + } + + /** + * Parse the node_group from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the node_group. + */ + matchNodeGroupFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .node_group; + } + /** * Return a fully-qualified projectLocationAutoscalingPolicy resource name string. * diff --git a/packages/google-cloud-dataproc/src/v1/job_controller_proto_list.json b/packages/google-cloud-dataproc/src/v1/job_controller_proto_list.json index b26a9be7c5f..3bb7ccf055a 100644 --- a/packages/google-cloud-dataproc/src/v1/job_controller_proto_list.json +++ b/packages/google-cloud-dataproc/src/v1/job_controller_proto_list.json @@ -3,6 +3,7 @@ "../../protos/google/cloud/dataproc/v1/batches.proto", "../../protos/google/cloud/dataproc/v1/clusters.proto", "../../protos/google/cloud/dataproc/v1/jobs.proto", + "../../protos/google/cloud/dataproc/v1/node_groups.proto", "../../protos/google/cloud/dataproc/v1/operations.proto", "../../protos/google/cloud/dataproc/v1/shared.proto", "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_client.ts b/packages/google-cloud-dataproc/src/v1/node_group_controller_client.ts similarity index 50% rename from owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_client.ts rename to packages/google-cloud-dataproc/src/v1/node_group_controller_client.ts index adf45e861d8..28fe326bab9 100644 --- a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_client.ts +++ b/packages/google-cloud-dataproc/src/v1/node_group_controller_client.ts @@ -18,7 +18,14 @@ /* global window */ import type * as gax from 'google-gax'; -import type {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; +import type { + Callback, + CallOptions, + Descriptors, + ClientOptions, + GrpcClientOptions, + LROperation, +} from 'google-gax'; import * as protos from '../../protos/protos'; import jsonProtos = require('../../protos/protos.json'); @@ -97,14 +104,22 @@ export class NodeGroupControllerClient { * const client = new NodeGroupControllerClient({fallback: 'rest'}, gax); * ``` */ - constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + constructor( + opts?: ClientOptions, + gaxInstance?: typeof gax | typeof gax.fallback + ) { // Ensure that options include all the required fields. const staticMembers = this.constructor as typeof NodeGroupControllerClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const servicePath = + opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!( + opts?.servicePath || opts?.apiEndpoint + ); const port = opts?.port || staticMembers.port; const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + const fallback = + opts?.fallback ?? + (typeof window !== 'undefined' && typeof window?.fetch === 'function'); opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); // Request numeric enum values if REST transport is used. @@ -130,7 +145,7 @@ export class NodeGroupControllerClient { this._opts = opts; // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + this.auth = this._gaxGrpc.auth as gax.GoogleAuth; // Set useJWTAccessWithScope on the auth object. this.auth.useJWTAccessWithScope = true; @@ -144,10 +159,7 @@ export class NodeGroupControllerClient { } // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; + const clientHeader = [`gax/${this._gaxModule.version}`, `gapic/${version}`]; if (typeof process !== 'undefined' && 'versions' in process) { clientHeader.push(`gl-node/${process.versions.node}`); } else { @@ -155,7 +167,7 @@ export class NodeGroupControllerClient { } if (!opts.fallback) { clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { + } else if (opts.fallback === 'rest') { clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); } if (opts.libName && opts.libVersion) { @@ -180,18 +192,22 @@ export class NodeGroupControllerClient { projectPathTemplate: new this._gaxModule.PathTemplate( 'projects/{project}' ), - projectLocationAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' - ), - projectLocationWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/workflowTemplates/{workflow_template}' - ), - projectRegionAutoscalingPolicyPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}' - ), - projectRegionWorkflowTemplatePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' - ), + projectLocationAutoscalingPolicyPathTemplate: + new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' + ), + projectLocationWorkflowTemplatePathTemplate: + new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/workflowTemplates/{workflow_template}' + ), + projectRegionAutoscalingPolicyPathTemplate: + new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}' + ), + projectRegionWorkflowTemplatePathTemplate: + new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' + ), regionPathTemplate: new this._gaxModule.PathTemplate( 'projects/{project}/regions/{region}' ), @@ -203,40 +219,158 @@ export class NodeGroupControllerClient { // rather than holding a request open. const lroOptions: GrpcClientOptions = { auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, }; if (opts.fallback === 'rest') { lroOptions.protoJson = protoFilesRoot; - lroOptions.httpRules = [{selector: 'google.iam.v1.IAMPolicy.GetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:getIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:getIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:getIamPolicy',body: '*',}], - },{selector: 'google.iam.v1.IAMPolicy.SetIamPolicy',post: '/v1/{resource=projects/*/regions/*/clusters/*}:setIamPolicy',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:setIamPolicy',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:setIamPolicy',body: '*',}], - },{selector: 'google.iam.v1.IAMPolicy.TestIamPermissions',post: '/v1/{resource=projects/*/regions/*/clusters/*}:testIamPermissions',body: '*',additional_bindings: [{post: '/v1/{resource=projects/*/regions/*/jobs/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/operations/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:testIamPermissions',body: '*',},{post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:testIamPermissions',body: '*',}], - },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1/{name=projects/*/regions/*/operations/*}:cancel',},{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1/{name=projects/*/regions/*/operations/*}',},{selector: 'google.longrunning.Operations.ListOperations',get: '/v1/{name=projects/*/regions/*/operations}',}]; + lroOptions.httpRules = [ + { + selector: 'google.iam.v1.IAMPolicy.GetIamPolicy', + post: '/v1/{resource=projects/*/regions/*/clusters/*}:getIamPolicy', + body: '*', + additional_bindings: [ + { + post: '/v1/{resource=projects/*/regions/*/jobs/*}:getIamPolicy', + body: '*', + }, + { + post: '/v1/{resource=projects/*/regions/*/operations/*}:getIamPolicy', + body: '*', + }, + { + post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:getIamPolicy', + body: '*', + }, + { + post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:getIamPolicy', + body: '*', + }, + { + post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:getIamPolicy', + body: '*', + }, + { + post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:getIamPolicy', + body: '*', + }, + ], + }, + { + selector: 'google.iam.v1.IAMPolicy.SetIamPolicy', + post: '/v1/{resource=projects/*/regions/*/clusters/*}:setIamPolicy', + body: '*', + additional_bindings: [ + { + post: '/v1/{resource=projects/*/regions/*/jobs/*}:setIamPolicy', + body: '*', + }, + { + post: '/v1/{resource=projects/*/regions/*/operations/*}:setIamPolicy', + body: '*', + }, + { + post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:setIamPolicy', + body: '*', + }, + { + post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:setIamPolicy', + body: '*', + }, + { + post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:setIamPolicy', + body: '*', + }, + { + post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:setIamPolicy', + body: '*', + }, + ], + }, + { + selector: 'google.iam.v1.IAMPolicy.TestIamPermissions', + post: '/v1/{resource=projects/*/regions/*/clusters/*}:testIamPermissions', + body: '*', + additional_bindings: [ + { + post: '/v1/{resource=projects/*/regions/*/jobs/*}:testIamPermissions', + body: '*', + }, + { + post: '/v1/{resource=projects/*/regions/*/operations/*}:testIamPermissions', + body: '*', + }, + { + post: '/v1/{resource=projects/*/regions/*/workflowTemplates/*}:testIamPermissions', + body: '*', + }, + { + post: '/v1/{resource=projects/*/locations/*/workflowTemplates/*}:testIamPermissions', + body: '*', + }, + { + post: '/v1/{resource=projects/*/regions/*/autoscalingPolicies/*}:testIamPermissions', + body: '*', + }, + { + post: '/v1/{resource=projects/*/locations/*/autoscalingPolicies/*}:testIamPermissions', + body: '*', + }, + ], + }, + { + selector: 'google.longrunning.Operations.CancelOperation', + post: '/v1/{name=projects/*/regions/*/operations/*}:cancel', + }, + { + selector: 'google.longrunning.Operations.DeleteOperation', + delete: '/v1/{name=projects/*/regions/*/operations/*}', + }, + { + selector: 'google.longrunning.Operations.GetOperation', + get: '/v1/{name=projects/*/regions/*/operations/*}', + }, + { + selector: 'google.longrunning.Operations.ListOperations', + get: '/v1/{name=projects/*/regions/*/operations}', + }, + ]; } - this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + this.operationsClient = this._gaxModule + .lro(lroOptions) + .operationsClient(opts); const createNodeGroupResponse = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.NodeGroup') as gax.protobuf.Type; + '.google.cloud.dataproc.v1.NodeGroup' + ) as gax.protobuf.Type; const createNodeGroupMetadata = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.NodeGroupOperationMetadata') as gax.protobuf.Type; + '.google.cloud.dataproc.v1.NodeGroupOperationMetadata' + ) as gax.protobuf.Type; const resizeNodeGroupResponse = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.NodeGroup') as gax.protobuf.Type; + '.google.cloud.dataproc.v1.NodeGroup' + ) as gax.protobuf.Type; const resizeNodeGroupMetadata = protoFilesRoot.lookup( - '.google.cloud.dataproc.v1.NodeGroupOperationMetadata') as gax.protobuf.Type; + '.google.cloud.dataproc.v1.NodeGroupOperationMetadata' + ) as gax.protobuf.Type; this.descriptors.longrunning = { createNodeGroup: new this._gaxModule.LongrunningDescriptor( this.operationsClient, createNodeGroupResponse.decode.bind(createNodeGroupResponse), - createNodeGroupMetadata.decode.bind(createNodeGroupMetadata)), + createNodeGroupMetadata.decode.bind(createNodeGroupMetadata) + ), resizeNodeGroup: new this._gaxModule.LongrunningDescriptor( this.operationsClient, resizeNodeGroupResponse.decode.bind(resizeNodeGroupResponse), - resizeNodeGroupMetadata.decode.bind(resizeNodeGroupMetadata)) + resizeNodeGroupMetadata.decode.bind(resizeNodeGroupMetadata) + ), }; // Put together the default options sent with requests. this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.dataproc.v1.NodeGroupController', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + 'google.cloud.dataproc.v1.NodeGroupController', + gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, + {'x-goog-api-client': clientHeader.join(' ')} + ); // Set up a dictionary of "inner API calls"; the core implementation // of calling the API is handled in `google-gax`, with this code @@ -267,32 +401,39 @@ export class NodeGroupControllerClient { // Put together the "service stub" for // google.cloud.dataproc.v1.NodeGroupController. this.nodeGroupControllerStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.dataproc.v1.NodeGroupController') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any + this._opts.fallback + ? (this._protos as protobuf.Root).lookupService( + 'google.cloud.dataproc.v1.NodeGroupController' + ) + : // eslint-disable-next-line @typescript-eslint/no-explicit-any (this._protos as any).google.cloud.dataproc.v1.NodeGroupController, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + this._opts, + this._providedCustomServicePath + ) as Promise<{[method: string]: Function}>; // Iterate over each of the methods that the service provides // and create an API call method for each. - const nodeGroupControllerStubMethods = - ['createNodeGroup', 'resizeNodeGroup', 'getNodeGroup']; + const nodeGroupControllerStubMethods = [ + 'createNodeGroup', + 'resizeNodeGroup', + 'getNodeGroup', + ]; for (const methodName of nodeGroupControllerStubMethods) { const callPromise = this.nodeGroupControllerStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { + stub => + (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error | null | undefined) => () => { throw err; - }); + } + ); - const descriptor = - this.descriptors.longrunning[methodName] || - undefined; + const descriptor = this.descriptors.longrunning[methodName] || undefined; const apiCall = this._gaxModule.createApiCall( callPromise, this._defaults[methodName], @@ -337,9 +478,7 @@ export class NodeGroupControllerClient { * @returns {string[]} List of default scopes. */ static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; + return ['https://www.googleapis.com/auth/cloud-platform']; } getProjectId(): Promise; @@ -348,8 +487,9 @@ export class NodeGroupControllerClient { * Return the project ID used by this class. * @returns {Promise} A promise that resolves to string containing the project ID. */ - getProjectId(callback?: Callback): - Promise|void { + getProjectId( + callback?: Callback + ): Promise | void { if (callback) { this.auth.getProjectId(callback); return; @@ -360,320 +500,428 @@ export class NodeGroupControllerClient { // ------------------- // -- Service calls -- // ------------------- -/** - * Gets the resource representation for a node group in a - * cluster. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.name - * Required. The name of the node group to retrieve. - * Format: - * `projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}` - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [NodeGroup]{@link google.cloud.dataproc.v1.NodeGroup}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/node_group_controller.get_node_group.js - * region_tag:dataproc_v1_generated_NodeGroupController_GetNodeGroup_async - */ + /** + * Gets the resource representation for a node group in a + * cluster. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. The name of the node group to retrieve. + * Format: + * `projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}` + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [NodeGroup]{@link google.cloud.dataproc.v1.NodeGroup}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/node_group_controller.get_node_group.js + * region_tag:dataproc_v1_generated_NodeGroupController_GetNodeGroup_async + */ getNodeGroup( - request?: protos.google.cloud.dataproc.v1.IGetNodeGroupRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.dataproc.v1.INodeGroup, - protos.google.cloud.dataproc.v1.IGetNodeGroupRequest|undefined, {}|undefined - ]>; + request?: protos.google.cloud.dataproc.v1.IGetNodeGroupRequest, + options?: CallOptions + ): Promise< + [ + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.IGetNodeGroupRequest | undefined, + {} | undefined + ] + >; getNodeGroup( - request: protos.google.cloud.dataproc.v1.IGetNodeGroupRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.dataproc.v1.INodeGroup, - protos.google.cloud.dataproc.v1.IGetNodeGroupRequest|null|undefined, - {}|null|undefined>): void; + request: protos.google.cloud.dataproc.v1.IGetNodeGroupRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.IGetNodeGroupRequest | null | undefined, + {} | null | undefined + > + ): void; getNodeGroup( - request: protos.google.cloud.dataproc.v1.IGetNodeGroupRequest, - callback: Callback< - protos.google.cloud.dataproc.v1.INodeGroup, - protos.google.cloud.dataproc.v1.IGetNodeGroupRequest|null|undefined, - {}|null|undefined>): void; + request: protos.google.cloud.dataproc.v1.IGetNodeGroupRequest, + callback: Callback< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.IGetNodeGroupRequest | null | undefined, + {} | null | undefined + > + ): void; getNodeGroup( - request?: protos.google.cloud.dataproc.v1.IGetNodeGroupRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.dataproc.v1.INodeGroup, - protos.google.cloud.dataproc.v1.IGetNodeGroupRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< + request?: protos.google.cloud.dataproc.v1.IGetNodeGroupRequest, + optionsOrCallback?: + | CallOptions + | Callback< protos.google.cloud.dataproc.v1.INodeGroup, - protos.google.cloud.dataproc.v1.IGetNodeGroupRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.dataproc.v1.INodeGroup, - protos.google.cloud.dataproc.v1.IGetNodeGroupRequest|undefined, {}|undefined - ]>|void { + | protos.google.cloud.dataproc.v1.IGetNodeGroupRequest + | null + | undefined, + {} | null | undefined + >, + callback?: Callback< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.IGetNodeGroupRequest | null | undefined, + {} | null | undefined + > + ): Promise< + [ + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.IGetNodeGroupRequest | undefined, + {} | undefined + ] + > | void { request = request || {}; let options: CallOptions; if (typeof optionsOrCallback === 'function' && callback === undefined) { callback = optionsOrCallback; options = {}; - } - else { + } else { options = optionsOrCallback as CallOptions; } options = options || {}; options.otherArgs = options.otherArgs || {}; options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'name': request.name ?? '', - }); + options.otherArgs.headers['x-goog-request-params'] = + this._gaxModule.routingHeader.fromParams({ + name: request.name ?? '', + }); this.initialize(); return this.innerApiCalls.getNodeGroup(request, options, callback); } -/** - * Creates a node group in a cluster. The returned - * {@link google.longrunning.Operation.metadata|Operation.metadata} is - * [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata). - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The parent resource where this node group will be created. - * Format: `projects/{project}/regions/{region}/clusters/{cluster}` - * @param {google.cloud.dataproc.v1.NodeGroup} request.nodeGroup - * Required. The node group to create. - * @param {string} [request.nodeGroupId] - * Optional. An optional node group ID. Generated if not specified. - * - * The ID must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). Cannot begin or end with underscore - * or hyphen. Must consist of from 3 to 33 characters. - * @param {string} [request.requestId] - * Optional. A unique ID used to identify the request. If the server receives - * two - * [CreateNodeGroupRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequests) - * with the same ID, the second request is ignored and the - * first {@link google.longrunning.Operation|google.longrunning.Operation} created - * and stored in the backend is returned. - * - * Recommendation: Set this value to a - * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - * - * The ID must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/node_group_controller.create_node_group.js - * region_tag:dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async - */ + /** + * Creates a node group in a cluster. The returned + * {@link google.longrunning.Operation.metadata|Operation.metadata} is + * [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The parent resource where this node group will be created. + * Format: `projects/{project}/regions/{region}/clusters/{cluster}` + * @param {google.cloud.dataproc.v1.NodeGroup} request.nodeGroup + * Required. The node group to create. + * @param {string} [request.nodeGroupId] + * Optional. An optional node group ID. Generated if not specified. + * + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). Cannot begin or end with underscore + * or hyphen. Must consist of from 3 to 33 characters. + * @param {string} [request.requestId] + * Optional. A unique ID used to identify the request. If the server receives + * two + * [CreateNodeGroupRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequests) + * with the same ID, the second request is ignored and the + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. + * + * Recommendation: Set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/node_group_controller.create_node_group.js + * region_tag:dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async + */ createNodeGroup( - request?: protos.google.cloud.dataproc.v1.ICreateNodeGroupRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; + request?: protos.google.cloud.dataproc.v1.ICreateNodeGroupRequest, + options?: CallOptions + ): Promise< + [ + LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >, + protos.google.longrunning.IOperation | undefined, + {} | undefined + ] + >; createNodeGroup( - request: protos.google.cloud.dataproc.v1.ICreateNodeGroupRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; + request: protos.google.cloud.dataproc.v1.ICreateNodeGroupRequest, + options: CallOptions, + callback: Callback< + LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): void; createNodeGroup( - request: protos.google.cloud.dataproc.v1.ICreateNodeGroupRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; + request: protos.google.cloud.dataproc.v1.ICreateNodeGroupRequest, + callback: Callback< + LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): void; createNodeGroup( - request?: protos.google.cloud.dataproc.v1.ICreateNodeGroupRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { + request?: protos.google.cloud.dataproc.v1.ICreateNodeGroupRequest, + optionsOrCallback?: + | CallOptions + | Callback< + LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + >, + callback?: Callback< + LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): Promise< + [ + LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >, + protos.google.longrunning.IOperation | undefined, + {} | undefined + ] + > | void { request = request || {}; let options: CallOptions; if (typeof optionsOrCallback === 'function' && callback === undefined) { callback = optionsOrCallback; options = {}; - } - else { + } else { options = optionsOrCallback as CallOptions; } options = options || {}; options.otherArgs = options.otherArgs || {}; options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'parent': request.parent ?? '', - }); + options.otherArgs.headers['x-goog-request-params'] = + this._gaxModule.routingHeader.fromParams({ + parent: request.parent ?? '', + }); this.initialize(); return this.innerApiCalls.createNodeGroup(request, options, callback); } -/** - * Check the status of the long running operation returned by `createNodeGroup()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/node_group_controller.create_node_group.js - * region_tag:dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async - */ - async checkCreateNodeGroupProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + /** + * Check the status of the long running operation returned by `createNodeGroup()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/node_group_controller.create_node_group.js + * region_tag:dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async + */ + async checkCreateNodeGroupProgress( + name: string + ): Promise< + LROperation< + protos.google.cloud.dataproc.v1.NodeGroup, + protos.google.cloud.dataproc.v1.NodeGroupOperationMetadata + > + > { + const request = + new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest( + {name} + ); const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.createNodeGroup, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; + const decodeOperation = new this._gaxModule.Operation( + operation, + this.descriptors.longrunning.createNodeGroup, + this._gaxModule.createDefaultBackoffSettings() + ); + return decodeOperation as LROperation< + protos.google.cloud.dataproc.v1.NodeGroup, + protos.google.cloud.dataproc.v1.NodeGroupOperationMetadata + >; } -/** - * Resizes a node group in a cluster. The returned - * {@link google.longrunning.Operation.metadata|Operation.metadata} is - * [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata). - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.name - * Required. The name of the node group to resize. - * Format: - * `projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}` - * @param {number} request.size - * Required. The number of running instances for the node group to maintain. - * The group adds or removes instances to maintain the number of instances - * specified by this parameter. - * @param {string} [request.requestId] - * Optional. A unique ID used to identify the request. If the server receives - * two - * [ResizeNodeGroupRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.ResizeNodeGroupRequests) - * with the same ID, the second request is ignored and the - * first {@link google.longrunning.Operation|google.longrunning.Operation} created - * and stored in the backend is returned. - * - * Recommendation: Set this value to a - * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). - * - * The ID must contain only letters (a-z, A-Z), numbers (0-9), - * underscores (_), and hyphens (-). The maximum length is 40 characters. - * @param {google.protobuf.Duration} [request.gracefulDecommissionTimeout] - * Optional. Timeout for graceful YARN decommissioning. [Graceful - * decommissioning] - * (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) - * allows the removal of nodes from the Compute Engine node group - * without interrupting jobs in progress. This timeout specifies how long to - * wait for jobs in progress to finish before forcefully removing nodes (and - * potentially interrupting jobs). Default timeout is 0 (for forceful - * decommission), and the maximum allowed timeout is 1 day. (see JSON - * representation of - * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). - * - * Only supported on Dataproc image versions 1.2 and higher. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/node_group_controller.resize_node_group.js - * region_tag:dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async - */ + /** + * Resizes a node group in a cluster. The returned + * {@link google.longrunning.Operation.metadata|Operation.metadata} is + * [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. The name of the node group to resize. + * Format: + * `projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}` + * @param {number} request.size + * Required. The number of running instances for the node group to maintain. + * The group adds or removes instances to maintain the number of instances + * specified by this parameter. + * @param {string} [request.requestId] + * Optional. A unique ID used to identify the request. If the server receives + * two + * [ResizeNodeGroupRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.ResizeNodeGroupRequests) + * with the same ID, the second request is ignored and the + * first {@link google.longrunning.Operation|google.longrunning.Operation} created + * and stored in the backend is returned. + * + * Recommendation: Set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The ID must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {google.protobuf.Duration} [request.gracefulDecommissionTimeout] + * Optional. Timeout for graceful YARN decommissioning. [Graceful + * decommissioning] + * (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) + * allows the removal of nodes from the Compute Engine node group + * without interrupting jobs in progress. This timeout specifies how long to + * wait for jobs in progress to finish before forcefully removing nodes (and + * potentially interrupting jobs). Default timeout is 0 (for forceful + * decommission), and the maximum allowed timeout is 1 day. (see JSON + * representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + * + * Only supported on Dataproc image versions 1.2 and higher. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/node_group_controller.resize_node_group.js + * region_tag:dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async + */ resizeNodeGroup( - request?: protos.google.cloud.dataproc.v1.IResizeNodeGroupRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; + request?: protos.google.cloud.dataproc.v1.IResizeNodeGroupRequest, + options?: CallOptions + ): Promise< + [ + LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >, + protos.google.longrunning.IOperation | undefined, + {} | undefined + ] + >; resizeNodeGroup( - request: protos.google.cloud.dataproc.v1.IResizeNodeGroupRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; + request: protos.google.cloud.dataproc.v1.IResizeNodeGroupRequest, + options: CallOptions, + callback: Callback< + LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): void; resizeNodeGroup( - request: protos.google.cloud.dataproc.v1.IResizeNodeGroupRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; + request: protos.google.cloud.dataproc.v1.IResizeNodeGroupRequest, + callback: Callback< + LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): void; resizeNodeGroup( - request?: protos.google.cloud.dataproc.v1.IResizeNodeGroupRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { + request?: protos.google.cloud.dataproc.v1.IResizeNodeGroupRequest, + optionsOrCallback?: + | CallOptions + | Callback< + LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + >, + callback?: Callback< + LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): Promise< + [ + LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >, + protos.google.longrunning.IOperation | undefined, + {} | undefined + ] + > | void { request = request || {}; let options: CallOptions; if (typeof optionsOrCallback === 'function' && callback === undefined) { callback = optionsOrCallback; options = {}; - } - else { + } else { options = optionsOrCallback as CallOptions; } options = options || {}; options.otherArgs = options.otherArgs || {}; options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = this._gaxModule.routingHeader.fromParams({ - 'name': request.name ?? '', - }); + options.otherArgs.headers['x-goog-request-params'] = + this._gaxModule.routingHeader.fromParams({ + name: request.name ?? '', + }); this.initialize(); return this.innerApiCalls.resizeNodeGroup(request, options, callback); } -/** - * Check the status of the long running operation returned by `resizeNodeGroup()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/node_group_controller.resize_node_group.js - * region_tag:dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async - */ - async checkResizeNodeGroupProgress(name: string): Promise>{ - const request = new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest({name}); + /** + * Check the status of the long running operation returned by `resizeNodeGroup()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/node_group_controller.resize_node_group.js + * region_tag:dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async + */ + async checkResizeNodeGroupProgress( + name: string + ): Promise< + LROperation< + protos.google.cloud.dataproc.v1.NodeGroup, + protos.google.cloud.dataproc.v1.NodeGroupOperationMetadata + > + > { + const request = + new this._gaxModule.operationsProtos.google.longrunning.GetOperationRequest( + {name} + ); const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new this._gaxModule.Operation(operation, this.descriptors.longrunning.resizeNodeGroup, this._gaxModule.createDefaultBackoffSettings()); - return decodeOperation as LROperation; + const decodeOperation = new this._gaxModule.Operation( + operation, + this.descriptors.longrunning.resizeNodeGroup, + this._gaxModule.createDefaultBackoffSettings() + ); + return decodeOperation as LROperation< + protos.google.cloud.dataproc.v1.NodeGroup, + protos.google.cloud.dataproc.v1.NodeGroupOperationMetadata + >; } // -------------------- // -- Path templates -- @@ -687,7 +935,7 @@ export class NodeGroupControllerClient { * @param {string} batch * @returns {string} Resource name string. */ - batchPath(project:string,location:string,batch:string) { + batchPath(project: string, location: string, batch: string) { return this.pathTemplates.batchPathTemplate.render({ project: project, location: location, @@ -736,7 +984,7 @@ export class NodeGroupControllerClient { * @param {string} cluster * @returns {string} Resource name string. */ - clusterRegionPath(project:string,region:string,cluster:string) { + clusterRegionPath(project: string, region: string, cluster: string) { return this.pathTemplates.clusterRegionPathTemplate.render({ project: project, region: region, @@ -752,7 +1000,8 @@ export class NodeGroupControllerClient { * @returns {string} A string representing the project. */ matchProjectFromClusterRegionName(clusterRegionName: string) { - return this.pathTemplates.clusterRegionPathTemplate.match(clusterRegionName).project; + return this.pathTemplates.clusterRegionPathTemplate.match(clusterRegionName) + .project; } /** @@ -763,7 +1012,8 @@ export class NodeGroupControllerClient { * @returns {string} A string representing the region. */ matchRegionFromClusterRegionName(clusterRegionName: string) { - return this.pathTemplates.clusterRegionPathTemplate.match(clusterRegionName).region; + return this.pathTemplates.clusterRegionPathTemplate.match(clusterRegionName) + .region; } /** @@ -774,7 +1024,8 @@ export class NodeGroupControllerClient { * @returns {string} A string representing the cluster. */ matchClusterFromClusterRegionName(clusterRegionName: string) { - return this.pathTemplates.clusterRegionPathTemplate.match(clusterRegionName).cluster; + return this.pathTemplates.clusterRegionPathTemplate.match(clusterRegionName) + .cluster; } /** @@ -786,7 +1037,12 @@ export class NodeGroupControllerClient { * @param {string} node_group * @returns {string} Resource name string. */ - nodeGroupPath(project:string,region:string,cluster:string,nodeGroup:string) { + nodeGroupPath( + project: string, + region: string, + cluster: string, + nodeGroup: string + ) { return this.pathTemplates.nodeGroupPathTemplate.render({ project: project, region: region, @@ -803,7 +1059,8 @@ export class NodeGroupControllerClient { * @returns {string} A string representing the project. */ matchProjectFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).project; + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .project; } /** @@ -825,7 +1082,8 @@ export class NodeGroupControllerClient { * @returns {string} A string representing the cluster. */ matchClusterFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).cluster; + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .cluster; } /** @@ -836,7 +1094,8 @@ export class NodeGroupControllerClient { * @returns {string} A string representing the node_group. */ matchNodeGroupFromNodeGroupName(nodeGroupName: string) { - return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).node_group; + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .node_group; } /** @@ -845,7 +1104,7 @@ export class NodeGroupControllerClient { * @param {string} project * @returns {string} Resource name string. */ - projectPath(project:string) { + projectPath(project: string) { return this.pathTemplates.projectPathTemplate.render({ project: project, }); @@ -870,12 +1129,18 @@ export class NodeGroupControllerClient { * @param {string} autoscaling_policy * @returns {string} Resource name string. */ - projectLocationAutoscalingPolicyPath(project:string,location:string,autoscalingPolicy:string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render({ - project: project, - location: location, - autoscaling_policy: autoscalingPolicy, - }); + projectLocationAutoscalingPolicyPath( + project: string, + location: string, + autoscalingPolicy: string + ) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render( + { + project: project, + location: location, + autoscaling_policy: autoscalingPolicy, + } + ); } /** @@ -885,8 +1150,12 @@ export class NodeGroupControllerClient { * A fully-qualified path representing project_location_autoscaling_policy resource. * @returns {string} A string representing the project. */ - matchProjectFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).project; + matchProjectFromProjectLocationAutoscalingPolicyName( + projectLocationAutoscalingPolicyName: string + ) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match( + projectLocationAutoscalingPolicyName + ).project; } /** @@ -896,8 +1165,12 @@ export class NodeGroupControllerClient { * A fully-qualified path representing project_location_autoscaling_policy resource. * @returns {string} A string representing the location. */ - matchLocationFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).location; + matchLocationFromProjectLocationAutoscalingPolicyName( + projectLocationAutoscalingPolicyName: string + ) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match( + projectLocationAutoscalingPolicyName + ).location; } /** @@ -907,8 +1180,12 @@ export class NodeGroupControllerClient { * A fully-qualified path representing project_location_autoscaling_policy resource. * @returns {string} A string representing the autoscaling_policy. */ - matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName(projectLocationAutoscalingPolicyName: string) { - return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match(projectLocationAutoscalingPolicyName).autoscaling_policy; + matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName( + projectLocationAutoscalingPolicyName: string + ) { + return this.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match( + projectLocationAutoscalingPolicyName + ).autoscaling_policy; } /** @@ -919,12 +1196,18 @@ export class NodeGroupControllerClient { * @param {string} workflow_template * @returns {string} Resource name string. */ - projectLocationWorkflowTemplatePath(project:string,location:string,workflowTemplate:string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render({ - project: project, - location: location, - workflow_template: workflowTemplate, - }); + projectLocationWorkflowTemplatePath( + project: string, + location: string, + workflowTemplate: string + ) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render( + { + project: project, + location: location, + workflow_template: workflowTemplate, + } + ); } /** @@ -934,8 +1217,12 @@ export class NodeGroupControllerClient { * A fully-qualified path representing project_location_workflow_template resource. * @returns {string} A string representing the project. */ - matchProjectFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).project; + matchProjectFromProjectLocationWorkflowTemplateName( + projectLocationWorkflowTemplateName: string + ) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match( + projectLocationWorkflowTemplateName + ).project; } /** @@ -945,8 +1232,12 @@ export class NodeGroupControllerClient { * A fully-qualified path representing project_location_workflow_template resource. * @returns {string} A string representing the location. */ - matchLocationFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).location; + matchLocationFromProjectLocationWorkflowTemplateName( + projectLocationWorkflowTemplateName: string + ) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match( + projectLocationWorkflowTemplateName + ).location; } /** @@ -956,8 +1247,12 @@ export class NodeGroupControllerClient { * A fully-qualified path representing project_location_workflow_template resource. * @returns {string} A string representing the workflow_template. */ - matchWorkflowTemplateFromProjectLocationWorkflowTemplateName(projectLocationWorkflowTemplateName: string) { - return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match(projectLocationWorkflowTemplateName).workflow_template; + matchWorkflowTemplateFromProjectLocationWorkflowTemplateName( + projectLocationWorkflowTemplateName: string + ) { + return this.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match( + projectLocationWorkflowTemplateName + ).workflow_template; } /** @@ -968,12 +1263,18 @@ export class NodeGroupControllerClient { * @param {string} autoscaling_policy * @returns {string} Resource name string. */ - projectRegionAutoscalingPolicyPath(project:string,region:string,autoscalingPolicy:string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render({ - project: project, - region: region, - autoscaling_policy: autoscalingPolicy, - }); + projectRegionAutoscalingPolicyPath( + project: string, + region: string, + autoscalingPolicy: string + ) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render( + { + project: project, + region: region, + autoscaling_policy: autoscalingPolicy, + } + ); } /** @@ -983,8 +1284,12 @@ export class NodeGroupControllerClient { * A fully-qualified path representing project_region_autoscaling_policy resource. * @returns {string} A string representing the project. */ - matchProjectFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).project; + matchProjectFromProjectRegionAutoscalingPolicyName( + projectRegionAutoscalingPolicyName: string + ) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match( + projectRegionAutoscalingPolicyName + ).project; } /** @@ -994,8 +1299,12 @@ export class NodeGroupControllerClient { * A fully-qualified path representing project_region_autoscaling_policy resource. * @returns {string} A string representing the region. */ - matchRegionFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).region; + matchRegionFromProjectRegionAutoscalingPolicyName( + projectRegionAutoscalingPolicyName: string + ) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match( + projectRegionAutoscalingPolicyName + ).region; } /** @@ -1005,8 +1314,12 @@ export class NodeGroupControllerClient { * A fully-qualified path representing project_region_autoscaling_policy resource. * @returns {string} A string representing the autoscaling_policy. */ - matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName(projectRegionAutoscalingPolicyName: string) { - return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match(projectRegionAutoscalingPolicyName).autoscaling_policy; + matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName( + projectRegionAutoscalingPolicyName: string + ) { + return this.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match( + projectRegionAutoscalingPolicyName + ).autoscaling_policy; } /** @@ -1017,7 +1330,11 @@ export class NodeGroupControllerClient { * @param {string} workflow_template * @returns {string} Resource name string. */ - projectRegionWorkflowTemplatePath(project:string,region:string,workflowTemplate:string) { + projectRegionWorkflowTemplatePath( + project: string, + region: string, + workflowTemplate: string + ) { return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render({ project: project, region: region, @@ -1032,8 +1349,12 @@ export class NodeGroupControllerClient { * A fully-qualified path representing project_region_workflow_template resource. * @returns {string} A string representing the project. */ - matchProjectFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).project; + matchProjectFromProjectRegionWorkflowTemplateName( + projectRegionWorkflowTemplateName: string + ) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match( + projectRegionWorkflowTemplateName + ).project; } /** @@ -1043,8 +1364,12 @@ export class NodeGroupControllerClient { * A fully-qualified path representing project_region_workflow_template resource. * @returns {string} A string representing the region. */ - matchRegionFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).region; + matchRegionFromProjectRegionWorkflowTemplateName( + projectRegionWorkflowTemplateName: string + ) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match( + projectRegionWorkflowTemplateName + ).region; } /** @@ -1054,8 +1379,12 @@ export class NodeGroupControllerClient { * A fully-qualified path representing project_region_workflow_template resource. * @returns {string} A string representing the workflow_template. */ - matchWorkflowTemplateFromProjectRegionWorkflowTemplateName(projectRegionWorkflowTemplateName: string) { - return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match(projectRegionWorkflowTemplateName).workflow_template; + matchWorkflowTemplateFromProjectRegionWorkflowTemplateName( + projectRegionWorkflowTemplateName: string + ) { + return this.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match( + projectRegionWorkflowTemplateName + ).workflow_template; } /** @@ -1065,7 +1394,7 @@ export class NodeGroupControllerClient { * @param {string} region * @returns {string} Resource name string. */ - regionPath(project:string,region:string) { + regionPath(project: string, region: string) { return this.pathTemplates.regionPathTemplate.render({ project: project, region: region, diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_client_config.json b/packages/google-cloud-dataproc/src/v1/node_group_controller_client_config.json similarity index 100% rename from owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_client_config.json rename to packages/google-cloud-dataproc/src/v1/node_group_controller_client_config.json diff --git a/owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_proto_list.json b/packages/google-cloud-dataproc/src/v1/node_group_controller_proto_list.json similarity index 100% rename from owl-bot-staging/google-cloud-dataproc/v1/src/v1/node_group_controller_proto_list.json rename to packages/google-cloud-dataproc/src/v1/node_group_controller_proto_list.json diff --git a/packages/google-cloud-dataproc/src/v1/workflow_template_service_client.ts b/packages/google-cloud-dataproc/src/v1/workflow_template_service_client.ts index 3dac976244a..f5559be9453 100644 --- a/packages/google-cloud-dataproc/src/v1/workflow_template_service_client.ts +++ b/packages/google-cloud-dataproc/src/v1/workflow_template_service_client.ts @@ -125,6 +125,9 @@ export class WorkflowTemplateServiceClient { (typeof window !== 'undefined' && typeof window?.fetch === 'function'); opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + // Request numeric enum values if REST transport is used. + opts.numericEnums = true; + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { opts['scopes'] = staticMembers.scopes; @@ -183,6 +186,9 @@ export class WorkflowTemplateServiceClient { batchPathTemplate: new this._gaxModule.PathTemplate( 'projects/{project}/locations/{location}/batches/{batch}' ), + nodeGroupPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}' + ), projectPathTemplate: new this._gaxModule.PathTemplate( 'projects/{project}' ), @@ -1609,6 +1615,76 @@ export class WorkflowTemplateServiceClient { return this.pathTemplates.batchPathTemplate.match(batchName).batch; } + /** + * Return a fully-qualified nodeGroup resource name string. + * + * @param {string} project + * @param {string} region + * @param {string} cluster + * @param {string} node_group + * @returns {string} Resource name string. + */ + nodeGroupPath( + project: string, + region: string, + cluster: string, + nodeGroup: string + ) { + return this.pathTemplates.nodeGroupPathTemplate.render({ + project: project, + region: region, + cluster: cluster, + node_group: nodeGroup, + }); + } + + /** + * Parse the project from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the project. + */ + matchProjectFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .project; + } + + /** + * Parse the region from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the region. + */ + matchRegionFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName).region; + } + + /** + * Parse the cluster from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the cluster. + */ + matchClusterFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .cluster; + } + + /** + * Parse the node_group from NodeGroup resource. + * + * @param {string} nodeGroupName + * A fully-qualified path representing NodeGroup resource. + * @returns {string} A string representing the node_group. + */ + matchNodeGroupFromNodeGroupName(nodeGroupName: string) { + return this.pathTemplates.nodeGroupPathTemplate.match(nodeGroupName) + .node_group; + } + /** * Return a fully-qualified project resource name string. * diff --git a/packages/google-cloud-dataproc/src/v1/workflow_template_service_proto_list.json b/packages/google-cloud-dataproc/src/v1/workflow_template_service_proto_list.json index b26a9be7c5f..3bb7ccf055a 100644 --- a/packages/google-cloud-dataproc/src/v1/workflow_template_service_proto_list.json +++ b/packages/google-cloud-dataproc/src/v1/workflow_template_service_proto_list.json @@ -3,6 +3,7 @@ "../../protos/google/cloud/dataproc/v1/batches.proto", "../../protos/google/cloud/dataproc/v1/clusters.proto", "../../protos/google/cloud/dataproc/v1/jobs.proto", + "../../protos/google/cloud/dataproc/v1/node_groups.proto", "../../protos/google/cloud/dataproc/v1/operations.proto", "../../protos/google/cloud/dataproc/v1/shared.proto", "../../protos/google/cloud/dataproc/v1/workflow_templates.proto" diff --git a/packages/google-cloud-dataproc/system-test/fixtures/sample/src/index.js b/packages/google-cloud-dataproc/system-test/fixtures/sample/src/index.js index db177f5d891..a7b952bd45e 100644 --- a/packages/google-cloud-dataproc/system-test/fixtures/sample/src/index.js +++ b/packages/google-cloud-dataproc/system-test/fixtures/sample/src/index.js @@ -25,6 +25,7 @@ function main() { const batchControllerClient = new dataproc.BatchControllerClient(); const clusterControllerClient = new dataproc.ClusterControllerClient(); const jobControllerClient = new dataproc.JobControllerClient(); + const nodeGroupControllerClient = new dataproc.NodeGroupControllerClient(); const workflowTemplateServiceClient = new dataproc.WorkflowTemplateServiceClient(); } diff --git a/packages/google-cloud-dataproc/system-test/fixtures/sample/src/index.ts b/packages/google-cloud-dataproc/system-test/fixtures/sample/src/index.ts index b5eb16171cd..d41f66aefb0 100644 --- a/packages/google-cloud-dataproc/system-test/fixtures/sample/src/index.ts +++ b/packages/google-cloud-dataproc/system-test/fixtures/sample/src/index.ts @@ -21,6 +21,7 @@ import { BatchControllerClient, ClusterControllerClient, JobControllerClient, + NodeGroupControllerClient, WorkflowTemplateServiceClient, } from '@google-cloud/dataproc'; @@ -39,6 +40,11 @@ function doStuffWithClusterControllerClient(client: ClusterControllerClient) { function doStuffWithJobControllerClient(client: JobControllerClient) { client.close(); } +function doStuffWithNodeGroupControllerClient( + client: NodeGroupControllerClient +) { + client.close(); +} function doStuffWithWorkflowTemplateServiceClient( client: WorkflowTemplateServiceClient ) { @@ -59,6 +65,9 @@ function main() { const jobControllerClient = new JobControllerClient(); doStuffWithJobControllerClient(jobControllerClient); // check that the client instance can be created + const nodeGroupControllerClient = new NodeGroupControllerClient(); + doStuffWithNodeGroupControllerClient(nodeGroupControllerClient); + // check that the client instance can be created const workflowTemplateServiceClient = new WorkflowTemplateServiceClient(); doStuffWithWorkflowTemplateServiceClient(workflowTemplateServiceClient); } diff --git a/packages/google-cloud-dataproc/test/gapic_autoscaling_policy_service_v1.ts b/packages/google-cloud-dataproc/test/gapic_autoscaling_policy_service_v1.ts index 26539241741..055856a2945 100644 --- a/packages/google-cloud-dataproc/test/gapic_autoscaling_policy_service_v1.ts +++ b/packages/google-cloud-dataproc/test/gapic_autoscaling_policy_service_v1.ts @@ -1275,6 +1275,83 @@ describe('v1.AutoscalingPolicyServiceClient', () => { }); }); + describe('nodeGroup', () => { + const fakePath = '/rendered/path/nodeGroup'; + const expectedParameters = { + project: 'projectValue', + region: 'regionValue', + cluster: 'clusterValue', + node_group: 'nodeGroupValue', + }; + const client = + new autoscalingpolicyserviceModule.v1.AutoscalingPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.nodeGroupPathTemplate.render = sinon + .stub() + .returns(fakePath); + client.pathTemplates.nodeGroupPathTemplate.match = sinon + .stub() + .returns(expectedParameters); + + it('nodeGroupPath', () => { + const result = client.nodeGroupPath( + 'projectValue', + 'regionValue', + 'clusterValue', + 'nodeGroupValue' + ); + assert.strictEqual(result, fakePath); + assert( + (client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromNodeGroupName', () => { + const result = client.matchProjectFromNodeGroupName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchRegionFromNodeGroupName', () => { + const result = client.matchRegionFromNodeGroupName(fakePath); + assert.strictEqual(result, 'regionValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchClusterFromNodeGroupName', () => { + const result = client.matchClusterFromNodeGroupName(fakePath); + assert.strictEqual(result, 'clusterValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchNodeGroupFromNodeGroupName', () => { + const result = client.matchNodeGroupFromNodeGroupName(fakePath); + assert.strictEqual(result, 'nodeGroupValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + describe('project', () => { const fakePath = '/rendered/path/project'; const expectedParameters = { diff --git a/packages/google-cloud-dataproc/test/gapic_batch_controller_v1.ts b/packages/google-cloud-dataproc/test/gapic_batch_controller_v1.ts index 3188b441d0b..e96d87042e3 100644 --- a/packages/google-cloud-dataproc/test/gapic_batch_controller_v1.ts +++ b/packages/google-cloud-dataproc/test/gapic_batch_controller_v1.ts @@ -1119,6 +1119,82 @@ describe('v1.BatchControllerClient', () => { }); }); + describe('nodeGroup', () => { + const fakePath = '/rendered/path/nodeGroup'; + const expectedParameters = { + project: 'projectValue', + region: 'regionValue', + cluster: 'clusterValue', + node_group: 'nodeGroupValue', + }; + const client = new batchcontrollerModule.v1.BatchControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.nodeGroupPathTemplate.render = sinon + .stub() + .returns(fakePath); + client.pathTemplates.nodeGroupPathTemplate.match = sinon + .stub() + .returns(expectedParameters); + + it('nodeGroupPath', () => { + const result = client.nodeGroupPath( + 'projectValue', + 'regionValue', + 'clusterValue', + 'nodeGroupValue' + ); + assert.strictEqual(result, fakePath); + assert( + (client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromNodeGroupName', () => { + const result = client.matchProjectFromNodeGroupName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchRegionFromNodeGroupName', () => { + const result = client.matchRegionFromNodeGroupName(fakePath); + assert.strictEqual(result, 'regionValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchClusterFromNodeGroupName', () => { + const result = client.matchClusterFromNodeGroupName(fakePath); + assert.strictEqual(result, 'clusterValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchNodeGroupFromNodeGroupName', () => { + const result = client.matchNodeGroupFromNodeGroupName(fakePath); + assert.strictEqual(result, 'nodeGroupValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + describe('project', () => { const fakePath = '/rendered/path/project'; const expectedParameters = { diff --git a/packages/google-cloud-dataproc/test/gapic_cluster_controller_v1.ts b/packages/google-cloud-dataproc/test/gapic_cluster_controller_v1.ts index c7ec31b2483..08a5715c302 100644 --- a/packages/google-cloud-dataproc/test/gapic_cluster_controller_v1.ts +++ b/packages/google-cloud-dataproc/test/gapic_cluster_controller_v1.ts @@ -2208,6 +2208,82 @@ describe('v1.ClusterControllerClient', () => { }); }); + describe('nodeGroup', () => { + const fakePath = '/rendered/path/nodeGroup'; + const expectedParameters = { + project: 'projectValue', + region: 'regionValue', + cluster: 'clusterValue', + node_group: 'nodeGroupValue', + }; + const client = new clustercontrollerModule.v1.ClusterControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.nodeGroupPathTemplate.render = sinon + .stub() + .returns(fakePath); + client.pathTemplates.nodeGroupPathTemplate.match = sinon + .stub() + .returns(expectedParameters); + + it('nodeGroupPath', () => { + const result = client.nodeGroupPath( + 'projectValue', + 'regionValue', + 'clusterValue', + 'nodeGroupValue' + ); + assert.strictEqual(result, fakePath); + assert( + (client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromNodeGroupName', () => { + const result = client.matchProjectFromNodeGroupName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchRegionFromNodeGroupName', () => { + const result = client.matchRegionFromNodeGroupName(fakePath); + assert.strictEqual(result, 'regionValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchClusterFromNodeGroupName', () => { + const result = client.matchClusterFromNodeGroupName(fakePath); + assert.strictEqual(result, 'clusterValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchNodeGroupFromNodeGroupName', () => { + const result = client.matchNodeGroupFromNodeGroupName(fakePath); + assert.strictEqual(result, 'nodeGroupValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + describe('projectLocationAutoscalingPolicy', () => { const fakePath = '/rendered/path/projectLocationAutoscalingPolicy'; const expectedParameters = { diff --git a/packages/google-cloud-dataproc/test/gapic_job_controller_v1.ts b/packages/google-cloud-dataproc/test/gapic_job_controller_v1.ts index 89c3bdb8edc..d43e3a1d45e 100644 --- a/packages/google-cloud-dataproc/test/gapic_job_controller_v1.ts +++ b/packages/google-cloud-dataproc/test/gapic_job_controller_v1.ts @@ -1682,6 +1682,82 @@ describe('v1.JobControllerClient', () => { }); }); + describe('nodeGroup', () => { + const fakePath = '/rendered/path/nodeGroup'; + const expectedParameters = { + project: 'projectValue', + region: 'regionValue', + cluster: 'clusterValue', + node_group: 'nodeGroupValue', + }; + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.nodeGroupPathTemplate.render = sinon + .stub() + .returns(fakePath); + client.pathTemplates.nodeGroupPathTemplate.match = sinon + .stub() + .returns(expectedParameters); + + it('nodeGroupPath', () => { + const result = client.nodeGroupPath( + 'projectValue', + 'regionValue', + 'clusterValue', + 'nodeGroupValue' + ); + assert.strictEqual(result, fakePath); + assert( + (client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromNodeGroupName', () => { + const result = client.matchProjectFromNodeGroupName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchRegionFromNodeGroupName', () => { + const result = client.matchRegionFromNodeGroupName(fakePath); + assert.strictEqual(result, 'regionValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchClusterFromNodeGroupName', () => { + const result = client.matchClusterFromNodeGroupName(fakePath); + assert.strictEqual(result, 'clusterValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchNodeGroupFromNodeGroupName', () => { + const result = client.matchNodeGroupFromNodeGroupName(fakePath); + assert.strictEqual(result, 'nodeGroupValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + describe('projectLocationAutoscalingPolicy', () => { const fakePath = '/rendered/path/projectLocationAutoscalingPolicy'; const expectedParameters = { diff --git a/packages/google-cloud-dataproc/test/gapic_node_group_controller_v1.ts b/packages/google-cloud-dataproc/test/gapic_node_group_controller_v1.ts new file mode 100644 index 00000000000..37bcdb19c1b --- /dev/null +++ b/packages/google-cloud-dataproc/test/gapic_node_group_controller_v1.ts @@ -0,0 +1,1390 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as nodegroupcontrollerModule from '../src'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON( + require('../protos/protos.json') +).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = ( + instance.constructor as typeof protobuf.Message + ).toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject( + filledObject + ) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error + ? sinon.stub().rejects(error) + : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback( + response?: ResponseType, + error?: Error +) { + return error + ? sinon.stub().callsArgWith(2, error) + : sinon.stub().callsArgWith(2, null, response); +} + +function stubLongRunningCall( + response?: ResponseType, + callError?: Error, + lroError?: Error +) { + const innerStub = lroError + ? sinon.stub().rejects(lroError) + : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError + ? sinon.stub().rejects(callError) + : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback( + response?: ResponseType, + callError?: Error, + lroError?: Error +) { + const innerStub = lroError + ? sinon.stub().rejects(lroError) + : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError + ? sinon.stub().callsArgWith(2, callError) + : sinon.stub().callsArgWith(2, null, mockOperation); +} + +describe('v1.NodeGroupControllerClient', () => { + describe('Common methods', () => { + it('has servicePath', () => { + const servicePath = + nodegroupcontrollerModule.v1.NodeGroupControllerClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = + nodegroupcontrollerModule.v1.NodeGroupControllerClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = nodegroupcontrollerModule.v1.NodeGroupControllerClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = + new nodegroupcontrollerModule.v1.NodeGroupControllerClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + fallback: true, + } + ); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + assert.strictEqual(client.nodeGroupControllerStub, undefined); + await client.initialize(); + assert(client.nodeGroupControllerStub); + }); + + it('has close method for the initialized client', done => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + assert(client.nodeGroupControllerStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + assert.strictEqual(client.nodeGroupControllerStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.auth.getProjectId = sinon + .stub() + .callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error | null, projectId?: string | null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getNodeGroup', () => { + it('invokes getNodeGroup without error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetNodeGroupRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.dataproc.v1.GetNodeGroupRequest', + ['name'] + ); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.NodeGroup() + ); + client.innerApiCalls.getNodeGroup = stubSimpleCall(expectedResponse); + const [response] = await client.getNodeGroup(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = ( + client.innerApiCalls.getNodeGroup as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.getNodeGroup as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getNodeGroup without error using callback', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetNodeGroupRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.dataproc.v1.GetNodeGroupRequest', + ['name'] + ); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.dataproc.v1.NodeGroup() + ); + client.innerApiCalls.getNodeGroup = + stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getNodeGroup( + request, + ( + err?: Error | null, + result?: protos.google.cloud.dataproc.v1.INodeGroup | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + } + ); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = ( + client.innerApiCalls.getNodeGroup as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.getNodeGroup as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getNodeGroup with error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetNodeGroupRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.dataproc.v1.GetNodeGroupRequest', + ['name'] + ); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.getNodeGroup = stubSimpleCall( + undefined, + expectedError + ); + await assert.rejects(client.getNodeGroup(request), expectedError); + const actualRequest = ( + client.innerApiCalls.getNodeGroup as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.getNodeGroup as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getNodeGroup with closed client', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.GetNodeGroupRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.dataproc.v1.GetNodeGroupRequest', + ['name'] + ); + request.name = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getNodeGroup(request), expectedError); + }); + }); + + describe('createNodeGroup', () => { + it('invokes createNodeGroup without error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateNodeGroupRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.dataproc.v1.CreateNodeGroupRequest', + ['parent'] + ); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.createNodeGroup = + stubLongRunningCall(expectedResponse); + const [operation] = await client.createNodeGroup(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = ( + client.innerApiCalls.createNodeGroup as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.createNodeGroup as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createNodeGroup without error using callback', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateNodeGroupRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.dataproc.v1.CreateNodeGroupRequest', + ['parent'] + ); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.createNodeGroup = + stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.createNodeGroup( + request, + ( + err?: Error | null, + result?: LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + > | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + } + ); + }); + const operation = (await promise) as LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = ( + client.innerApiCalls.createNodeGroup as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.createNodeGroup as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createNodeGroup with call error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateNodeGroupRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.dataproc.v1.CreateNodeGroupRequest', + ['parent'] + ); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.createNodeGroup = stubLongRunningCall( + undefined, + expectedError + ); + await assert.rejects(client.createNodeGroup(request), expectedError); + const actualRequest = ( + client.innerApiCalls.createNodeGroup as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.createNodeGroup as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes createNodeGroup with LRO error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.CreateNodeGroupRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.dataproc.v1.CreateNodeGroupRequest', + ['parent'] + ); + request.parent = defaultValue1; + const expectedHeaderRequestParams = `parent=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.createNodeGroup = stubLongRunningCall( + undefined, + undefined, + expectedError + ); + const [operation] = await client.createNodeGroup(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = ( + client.innerApiCalls.createNodeGroup as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.createNodeGroup as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkCreateNodeGroupProgress without error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')}; + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkCreateNodeGroupProgress( + expectedResponse.name + ); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkCreateNodeGroupProgress with error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall( + undefined, + expectedError + ); + await assert.rejects( + client.checkCreateNodeGroupProgress(''), + expectedError + ); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + }); + + describe('resizeNodeGroup', () => { + it('invokes resizeNodeGroup without error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ResizeNodeGroupRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.dataproc.v1.ResizeNodeGroupRequest', + ['name'] + ); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.resizeNodeGroup = + stubLongRunningCall(expectedResponse); + const [operation] = await client.resizeNodeGroup(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = ( + client.innerApiCalls.resizeNodeGroup as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.resizeNodeGroup as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes resizeNodeGroup without error using callback', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ResizeNodeGroupRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.dataproc.v1.ResizeNodeGroupRequest', + ['name'] + ); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.resizeNodeGroup = + stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.resizeNodeGroup( + request, + ( + err?: Error | null, + result?: LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + > | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + } + ); + }); + const operation = (await promise) as LROperation< + protos.google.cloud.dataproc.v1.INodeGroup, + protos.google.cloud.dataproc.v1.INodeGroupOperationMetadata + >; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = ( + client.innerApiCalls.resizeNodeGroup as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.resizeNodeGroup as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes resizeNodeGroup with call error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ResizeNodeGroupRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.dataproc.v1.ResizeNodeGroupRequest', + ['name'] + ); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.resizeNodeGroup = stubLongRunningCall( + undefined, + expectedError + ); + await assert.rejects(client.resizeNodeGroup(request), expectedError); + const actualRequest = ( + client.innerApiCalls.resizeNodeGroup as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.resizeNodeGroup as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes resizeNodeGroup with LRO error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.ResizeNodeGroupRequest() + ); + const defaultValue1 = getTypeDefaultValue( + '.google.cloud.dataproc.v1.ResizeNodeGroupRequest', + ['name'] + ); + request.name = defaultValue1; + const expectedHeaderRequestParams = `name=${defaultValue1}`; + const expectedError = new Error('expected'); + client.innerApiCalls.resizeNodeGroup = stubLongRunningCall( + undefined, + undefined, + expectedError + ); + const [operation] = await client.resizeNodeGroup(request); + await assert.rejects(operation.promise(), expectedError); + const actualRequest = ( + client.innerApiCalls.resizeNodeGroup as SinonStub + ).getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = ( + client.innerApiCalls.resizeNodeGroup as SinonStub + ).getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes checkResizeNodeGroupProgress without error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const expectedResponse = generateSampleMessage( + new operationsProtos.google.longrunning.Operation() + ); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')}; + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkResizeNodeGroupProgress( + expectedResponse.name + ); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkResizeNodeGroupProgress with error', async () => { + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall( + undefined, + expectedError + ); + await assert.rejects( + client.checkResizeNodeGroupProgress(''), + expectedError + ); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + }); + + describe('Path templates', () => { + describe('batch', () => { + const fakePath = '/rendered/path/batch'; + const expectedParameters = { + project: 'projectValue', + location: 'locationValue', + batch: 'batchValue', + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + client.pathTemplates.batchPathTemplate.render = sinon + .stub() + .returns(fakePath); + client.pathTemplates.batchPathTemplate.match = sinon + .stub() + .returns(expectedParameters); + + it('batchPath', () => { + const result = client.batchPath( + 'projectValue', + 'locationValue', + 'batchValue' + ); + assert.strictEqual(result, fakePath); + assert( + (client.pathTemplates.batchPathTemplate.render as SinonStub) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromBatchName', () => { + const result = client.matchProjectFromBatchName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + (client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchLocationFromBatchName', () => { + const result = client.matchLocationFromBatchName(fakePath); + assert.strictEqual(result, 'locationValue'); + assert( + (client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchBatchFromBatchName', () => { + const result = client.matchBatchFromBatchName(fakePath); + assert.strictEqual(result, 'batchValue'); + assert( + (client.pathTemplates.batchPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + + describe('clusterRegion', () => { + const fakePath = '/rendered/path/clusterRegion'; + const expectedParameters = { + project: 'projectValue', + region: 'regionValue', + cluster: 'clusterValue', + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + client.pathTemplates.clusterRegionPathTemplate.render = sinon + .stub() + .returns(fakePath); + client.pathTemplates.clusterRegionPathTemplate.match = sinon + .stub() + .returns(expectedParameters); + + it('clusterRegionPath', () => { + const result = client.clusterRegionPath( + 'projectValue', + 'regionValue', + 'clusterValue' + ); + assert.strictEqual(result, fakePath); + assert( + (client.pathTemplates.clusterRegionPathTemplate.render as SinonStub) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromClusterRegionName', () => { + const result = client.matchProjectFromClusterRegionName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + (client.pathTemplates.clusterRegionPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchRegionFromClusterRegionName', () => { + const result = client.matchRegionFromClusterRegionName(fakePath); + assert.strictEqual(result, 'regionValue'); + assert( + (client.pathTemplates.clusterRegionPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchClusterFromClusterRegionName', () => { + const result = client.matchClusterFromClusterRegionName(fakePath); + assert.strictEqual(result, 'clusterValue'); + assert( + (client.pathTemplates.clusterRegionPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + + describe('nodeGroup', () => { + const fakePath = '/rendered/path/nodeGroup'; + const expectedParameters = { + project: 'projectValue', + region: 'regionValue', + cluster: 'clusterValue', + node_group: 'nodeGroupValue', + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + client.pathTemplates.nodeGroupPathTemplate.render = sinon + .stub() + .returns(fakePath); + client.pathTemplates.nodeGroupPathTemplate.match = sinon + .stub() + .returns(expectedParameters); + + it('nodeGroupPath', () => { + const result = client.nodeGroupPath( + 'projectValue', + 'regionValue', + 'clusterValue', + 'nodeGroupValue' + ); + assert.strictEqual(result, fakePath); + assert( + (client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromNodeGroupName', () => { + const result = client.matchProjectFromNodeGroupName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchRegionFromNodeGroupName', () => { + const result = client.matchRegionFromNodeGroupName(fakePath); + assert.strictEqual(result, 'regionValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchClusterFromNodeGroupName', () => { + const result = client.matchClusterFromNodeGroupName(fakePath); + assert.strictEqual(result, 'clusterValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchNodeGroupFromNodeGroupName', () => { + const result = client.matchNodeGroupFromNodeGroupName(fakePath); + assert.strictEqual(result, 'nodeGroupValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + + describe('project', () => { + const fakePath = '/rendered/path/project'; + const expectedParameters = { + project: 'projectValue', + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + client.pathTemplates.projectPathTemplate.render = sinon + .stub() + .returns(fakePath); + client.pathTemplates.projectPathTemplate.match = sinon + .stub() + .returns(expectedParameters); + + it('projectPath', () => { + const result = client.projectPath('projectValue'); + assert.strictEqual(result, fakePath); + assert( + (client.pathTemplates.projectPathTemplate.render as SinonStub) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromProjectName', () => { + const result = client.matchProjectFromProjectName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + (client.pathTemplates.projectPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + + describe('projectLocationAutoscalingPolicy', () => { + const fakePath = '/rendered/path/projectLocationAutoscalingPolicy'; + const expectedParameters = { + project: 'projectValue', + location: 'locationValue', + autoscaling_policy: 'autoscalingPolicyValue', + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationAutoscalingPolicyPath', () => { + const result = client.projectLocationAutoscalingPolicyPath( + 'projectValue', + 'locationValue', + 'autoscalingPolicyValue' + ); + assert.strictEqual(result, fakePath); + assert( + ( + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate + .render as SinonStub + ) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromProjectLocationAutoscalingPolicyName', () => { + const result = + client.matchProjectFromProjectLocationAutoscalingPolicyName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + ( + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate + .match as SinonStub + ) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchLocationFromProjectLocationAutoscalingPolicyName', () => { + const result = + client.matchLocationFromProjectLocationAutoscalingPolicyName( + fakePath + ); + assert.strictEqual(result, 'locationValue'); + assert( + ( + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate + .match as SinonStub + ) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName', () => { + const result = + client.matchAutoscalingPolicyFromProjectLocationAutoscalingPolicyName( + fakePath + ); + assert.strictEqual(result, 'autoscalingPolicyValue'); + assert( + ( + client.pathTemplates.projectLocationAutoscalingPolicyPathTemplate + .match as SinonStub + ) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + + describe('projectLocationWorkflowTemplate', () => { + const fakePath = '/rendered/path/projectLocationWorkflowTemplate'; + const expectedParameters = { + project: 'projectValue', + location: 'locationValue', + workflow_template: 'workflowTemplateValue', + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectLocationWorkflowTemplatePath', () => { + const result = client.projectLocationWorkflowTemplatePath( + 'projectValue', + 'locationValue', + 'workflowTemplateValue' + ); + assert.strictEqual(result, fakePath); + assert( + ( + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate + .render as SinonStub + ) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromProjectLocationWorkflowTemplateName', () => { + const result = + client.matchProjectFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + ( + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate + .match as SinonStub + ) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchLocationFromProjectLocationWorkflowTemplateName', () => { + const result = + client.matchLocationFromProjectLocationWorkflowTemplateName(fakePath); + assert.strictEqual(result, 'locationValue'); + assert( + ( + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate + .match as SinonStub + ) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchWorkflowTemplateFromProjectLocationWorkflowTemplateName', () => { + const result = + client.matchWorkflowTemplateFromProjectLocationWorkflowTemplateName( + fakePath + ); + assert.strictEqual(result, 'workflowTemplateValue'); + assert( + ( + client.pathTemplates.projectLocationWorkflowTemplatePathTemplate + .match as SinonStub + ) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + + describe('projectRegionAutoscalingPolicy', () => { + const fakePath = '/rendered/path/projectRegionAutoscalingPolicy'; + const expectedParameters = { + project: 'projectValue', + region: 'regionValue', + autoscaling_policy: 'autoscalingPolicyValue', + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionAutoscalingPolicyPath', () => { + const result = client.projectRegionAutoscalingPolicyPath( + 'projectValue', + 'regionValue', + 'autoscalingPolicyValue' + ); + assert.strictEqual(result, fakePath); + assert( + ( + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate + .render as SinonStub + ) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromProjectRegionAutoscalingPolicyName', () => { + const result = + client.matchProjectFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + ( + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate + .match as SinonStub + ) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchRegionFromProjectRegionAutoscalingPolicyName', () => { + const result = + client.matchRegionFromProjectRegionAutoscalingPolicyName(fakePath); + assert.strictEqual(result, 'regionValue'); + assert( + ( + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate + .match as SinonStub + ) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName', () => { + const result = + client.matchAutoscalingPolicyFromProjectRegionAutoscalingPolicyName( + fakePath + ); + assert.strictEqual(result, 'autoscalingPolicyValue'); + assert( + ( + client.pathTemplates.projectRegionAutoscalingPolicyPathTemplate + .match as SinonStub + ) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + + describe('projectRegionWorkflowTemplate', () => { + const fakePath = '/rendered/path/projectRegionWorkflowTemplate'; + const expectedParameters = { + project: 'projectValue', + region: 'regionValue', + workflow_template: 'workflowTemplateValue', + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectRegionWorkflowTemplatePath', () => { + const result = client.projectRegionWorkflowTemplatePath( + 'projectValue', + 'regionValue', + 'workflowTemplateValue' + ); + assert.strictEqual(result, fakePath); + assert( + ( + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate + .render as SinonStub + ) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromProjectRegionWorkflowTemplateName', () => { + const result = + client.matchProjectFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + ( + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate + .match as SinonStub + ) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchRegionFromProjectRegionWorkflowTemplateName', () => { + const result = + client.matchRegionFromProjectRegionWorkflowTemplateName(fakePath); + assert.strictEqual(result, 'regionValue'); + assert( + ( + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate + .match as SinonStub + ) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchWorkflowTemplateFromProjectRegionWorkflowTemplateName', () => { + const result = + client.matchWorkflowTemplateFromProjectRegionWorkflowTemplateName( + fakePath + ); + assert.strictEqual(result, 'workflowTemplateValue'); + assert( + ( + client.pathTemplates.projectRegionWorkflowTemplatePathTemplate + .match as SinonStub + ) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + + describe('region', () => { + const fakePath = '/rendered/path/region'; + const expectedParameters = { + project: 'projectValue', + region: 'regionValue', + }; + const client = new nodegroupcontrollerModule.v1.NodeGroupControllerClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + client.initialize(); + client.pathTemplates.regionPathTemplate.render = sinon + .stub() + .returns(fakePath); + client.pathTemplates.regionPathTemplate.match = sinon + .stub() + .returns(expectedParameters); + + it('regionPath', () => { + const result = client.regionPath('projectValue', 'regionValue'); + assert.strictEqual(result, fakePath); + assert( + (client.pathTemplates.regionPathTemplate.render as SinonStub) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromRegionName', () => { + const result = client.matchProjectFromRegionName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + (client.pathTemplates.regionPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchRegionFromRegionName', () => { + const result = client.matchRegionFromRegionName(fakePath); + assert.strictEqual(result, 'regionValue'); + assert( + (client.pathTemplates.regionPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + }); +}); diff --git a/packages/google-cloud-dataproc/test/gapic_workflow_template_service_v1.ts b/packages/google-cloud-dataproc/test/gapic_workflow_template_service_v1.ts index 263c2e70806..f41a1dca5f9 100644 --- a/packages/google-cloud-dataproc/test/gapic_workflow_template_service_v1.ts +++ b/packages/google-cloud-dataproc/test/gapic_workflow_template_service_v1.ts @@ -1663,6 +1663,83 @@ describe('v1.WorkflowTemplateServiceClient', () => { }); }); + describe('nodeGroup', () => { + const fakePath = '/rendered/path/nodeGroup'; + const expectedParameters = { + project: 'projectValue', + region: 'regionValue', + cluster: 'clusterValue', + node_group: 'nodeGroupValue', + }; + const client = + new workflowtemplateserviceModule.v1.WorkflowTemplateServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.nodeGroupPathTemplate.render = sinon + .stub() + .returns(fakePath); + client.pathTemplates.nodeGroupPathTemplate.match = sinon + .stub() + .returns(expectedParameters); + + it('nodeGroupPath', () => { + const result = client.nodeGroupPath( + 'projectValue', + 'regionValue', + 'clusterValue', + 'nodeGroupValue' + ); + assert.strictEqual(result, fakePath); + assert( + (client.pathTemplates.nodeGroupPathTemplate.render as SinonStub) + .getCall(-1) + .calledWith(expectedParameters) + ); + }); + + it('matchProjectFromNodeGroupName', () => { + const result = client.matchProjectFromNodeGroupName(fakePath); + assert.strictEqual(result, 'projectValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchRegionFromNodeGroupName', () => { + const result = client.matchRegionFromNodeGroupName(fakePath); + assert.strictEqual(result, 'regionValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchClusterFromNodeGroupName', () => { + const result = client.matchClusterFromNodeGroupName(fakePath); + assert.strictEqual(result, 'clusterValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + + it('matchNodeGroupFromNodeGroupName', () => { + const result = client.matchNodeGroupFromNodeGroupName(fakePath); + assert.strictEqual(result, 'nodeGroupValue'); + assert( + (client.pathTemplates.nodeGroupPathTemplate.match as SinonStub) + .getCall(-1) + .calledWith(fakePath) + ); + }); + }); + describe('project', () => { const fakePath = '/rendered/path/project'; const expectedParameters = {