From 3ff0744b301465bfd4a8371ecf71a65cb6e1ece4 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 24 May 2022 10:01:41 +0000 Subject: [PATCH 1/2] fix: fixes for dynamic routing and streaming descriptors Use gapic-generator-typescript v2.14.5. PiperOrigin-RevId: 450616838 Source-Link: https://github.com/googleapis/googleapis/commit/7a47b72791e0b84d78beca4c2b26bec42ce31572 Source-Link: https://github.com/googleapis/googleapis-gen/commit/42cc6331bae0b99f61b8e01ae15b05211716c4f9 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNDJjYzYzMzFiYWUwYjk5ZjYxYjhlMDFhZTE1YjA1MjExNzE2YzRmOSJ9 --- owl-bot-staging/v1/.eslintignore | 7 + owl-bot-staging/v1/.eslintrc.json | 3 + owl-bot-staging/v1/.gitignore | 14 + owl-bot-staging/v1/.jsdoc.js | 55 + owl-bot-staging/v1/.mocharc.js | 33 + owl-bot-staging/v1/.prettierrc.js | 22 + owl-bot-staging/v1/README.md | 1 + owl-bot-staging/v1/linkinator.config.json | 16 + owl-bot-staging/v1/package.json | 65 ++ .../cloud/bigquery/storage/v1/arrow.proto | 64 + .../cloud/bigquery/storage/v1/avro.proto | 41 + .../cloud/bigquery/storage/v1/protobuf.proto | 48 + .../cloud/bigquery/storage/v1/storage.proto | 624 ++++++++++ .../cloud/bigquery/storage/v1/stream.proto | 217 ++++ .../cloud/bigquery/storage/v1/table.proto | 164 +++ .../v1/big_query_read.create_read_session.js | 74 ++ .../generated/v1/big_query_read.read_rows.js | 66 ++ .../v1/big_query_read.split_read_stream.js | 68 ++ .../v1/big_query_write.append_rows.js | 85 ++ ..._query_write.batch_commit_write_streams.js | 64 + .../v1/big_query_write.create_write_stream.js | 64 + .../big_query_write.finalize_write_stream.js | 59 + .../v1/big_query_write.flush_rows.js | 63 + .../v1/big_query_write.get_write_stream.js | 59 + ...data.google.cloud.bigquery.storage.v1.json | 415 +++++++ owl-bot-staging/v1/src/index.ts | 27 + .../v1/src/v1/big_query_read_client.ts | 816 +++++++++++++ .../src/v1/big_query_read_client_config.json | 44 + .../v1/src/v1/big_query_read_proto_list.json | 8 + .../v1/src/v1/big_query_write_client.ts | 1028 +++++++++++++++++ .../src/v1/big_query_write_client_config.json | 59 + .../v1/src/v1/big_query_write_proto_list.json | 8 + owl-bot-staging/v1/src/v1/gapic_metadata.json | 117 ++ owl-bot-staging/v1/src/v1/index.ts | 20 + .../system-test/fixtures/sample/src/index.js | 28 + .../system-test/fixtures/sample/src/index.ts | 38 + owl-bot-staging/v1/system-test/install.ts | 49 + .../v1/test/gapic_big_query_read_v1.ts | 669 +++++++++++ .../v1/test/gapic_big_query_write_v1.ts | 921 +++++++++++++++ owl-bot-staging/v1/tsconfig.json | 19 + owl-bot-staging/v1/webpack.config.js | 64 + owl-bot-staging/v1beta1/.eslintignore | 7 + owl-bot-staging/v1beta1/.eslintrc.json | 3 + owl-bot-staging/v1beta1/.gitignore | 14 + owl-bot-staging/v1beta1/.jsdoc.js | 55 + owl-bot-staging/v1beta1/.mocharc.js | 33 + owl-bot-staging/v1beta1/.prettierrc.js | 22 + owl-bot-staging/v1beta1/README.md | 1 + .../v1beta1/linkinator.config.json | 16 + owl-bot-staging/v1beta1/package.json | 64 + .../bigquery/storage/v1beta1/arrow.proto | 36 + .../cloud/bigquery/storage/v1beta1/avro.proto | 37 + .../storage/v1beta1/read_options.proto | 39 + .../bigquery/storage/v1beta1/storage.proto | 405 +++++++ .../storage/v1beta1/table_reference.proto | 42 + ...orage.batch_create_read_session_streams.js | 66 ++ .../big_query_storage.create_read_session.js | 91 ++ .../big_query_storage.finalize_stream.js | 58 + .../v1beta1/big_query_storage.read_rows.js | 62 + .../big_query_storage.split_read_stream.js | 68 ++ ...google.cloud.bigquery.storage.v1beta1.json | 247 ++++ owl-bot-staging/v1beta1/src/index.ts | 25 + .../src/v1beta1/big_query_storage_client.ts | 852 ++++++++++++++ .../big_query_storage_client_config.json | 54 + .../v1beta1/big_query_storage_proto_list.json | 7 + .../v1beta1/src/v1beta1/gapic_metadata.json | 68 ++ owl-bot-staging/v1beta1/src/v1beta1/index.ts | 19 + .../system-test/fixtures/sample/src/index.js | 27 + .../system-test/fixtures/sample/src/index.ts | 32 + .../v1beta1/system-test/install.ts | 49 + .../test/gapic_big_query_storage_v1beta1.ts | 781 +++++++++++++ owl-bot-staging/v1beta1/tsconfig.json | 19 + owl-bot-staging/v1beta1/webpack.config.js | 64 + 73 files changed, 9669 insertions(+) create mode 100644 owl-bot-staging/v1/.eslintignore create mode 100644 owl-bot-staging/v1/.eslintrc.json create mode 100644 owl-bot-staging/v1/.gitignore create mode 100644 owl-bot-staging/v1/.jsdoc.js create mode 100644 owl-bot-staging/v1/.mocharc.js create mode 100644 owl-bot-staging/v1/.prettierrc.js create mode 100644 owl-bot-staging/v1/README.md create mode 100644 owl-bot-staging/v1/linkinator.config.json create mode 100644 owl-bot-staging/v1/package.json create mode 100644 owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/arrow.proto create mode 100644 owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/avro.proto create mode 100644 owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/protobuf.proto create mode 100644 owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/storage.proto create mode 100644 owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/stream.proto create mode 100644 owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/table.proto create mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_read.create_read_session.js create mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_read.read_rows.js create mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_read.split_read_stream.js create mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_write.append_rows.js create mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_write.batch_commit_write_streams.js create mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_write.create_write_stream.js create mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_write.finalize_write_stream.js create mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_write.flush_rows.js create mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_write.get_write_stream.js create mode 100644 owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json create mode 100644 owl-bot-staging/v1/src/index.ts create mode 100644 owl-bot-staging/v1/src/v1/big_query_read_client.ts create mode 100644 owl-bot-staging/v1/src/v1/big_query_read_client_config.json create mode 100644 owl-bot-staging/v1/src/v1/big_query_read_proto_list.json create mode 100644 owl-bot-staging/v1/src/v1/big_query_write_client.ts create mode 100644 owl-bot-staging/v1/src/v1/big_query_write_client_config.json create mode 100644 owl-bot-staging/v1/src/v1/big_query_write_proto_list.json create mode 100644 owl-bot-staging/v1/src/v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/src/v1/index.ts create mode 100644 owl-bot-staging/v1/system-test/fixtures/sample/src/index.js create mode 100644 owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts create mode 100644 owl-bot-staging/v1/system-test/install.ts create mode 100644 owl-bot-staging/v1/test/gapic_big_query_read_v1.ts create mode 100644 owl-bot-staging/v1/test/gapic_big_query_write_v1.ts create mode 100644 owl-bot-staging/v1/tsconfig.json create mode 100644 owl-bot-staging/v1/webpack.config.js create mode 100644 owl-bot-staging/v1beta1/.eslintignore create mode 100644 owl-bot-staging/v1beta1/.eslintrc.json create mode 100644 owl-bot-staging/v1beta1/.gitignore create mode 100644 owl-bot-staging/v1beta1/.jsdoc.js create mode 100644 owl-bot-staging/v1beta1/.mocharc.js create mode 100644 owl-bot-staging/v1beta1/.prettierrc.js create mode 100644 owl-bot-staging/v1beta1/README.md create mode 100644 owl-bot-staging/v1beta1/linkinator.config.json create mode 100644 owl-bot-staging/v1beta1/package.json create mode 100644 owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto create mode 100644 owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/avro.proto create mode 100644 owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto create mode 100644 owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/storage.proto create mode 100644 owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto create mode 100644 owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js create mode 100644 owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.create_read_session.js create mode 100644 owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.finalize_stream.js create mode 100644 owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.read_rows.js create mode 100644 owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.split_read_stream.js create mode 100644 owl-bot-staging/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.bigquery.storage.v1beta1.json create mode 100644 owl-bot-staging/v1beta1/src/index.ts create mode 100644 owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client.ts create mode 100644 owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client_config.json create mode 100644 owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_proto_list.json create mode 100644 owl-bot-staging/v1beta1/src/v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/src/v1beta1/index.ts create mode 100644 owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.js create mode 100644 owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.ts create mode 100644 owl-bot-staging/v1beta1/system-test/install.ts create mode 100644 owl-bot-staging/v1beta1/test/gapic_big_query_storage_v1beta1.ts create mode 100644 owl-bot-staging/v1beta1/tsconfig.json create mode 100644 owl-bot-staging/v1beta1/webpack.config.js diff --git a/owl-bot-staging/v1/.eslintignore b/owl-bot-staging/v1/.eslintignore new file mode 100644 index 00000000..cfc348ec --- /dev/null +++ b/owl-bot-staging/v1/.eslintignore @@ -0,0 +1,7 @@ +**/node_modules +**/.coverage +build/ +docs/ +protos/ +system-test/ +samples/generated/ diff --git a/owl-bot-staging/v1/.eslintrc.json b/owl-bot-staging/v1/.eslintrc.json new file mode 100644 index 00000000..78215349 --- /dev/null +++ b/owl-bot-staging/v1/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/owl-bot-staging/v1/.gitignore b/owl-bot-staging/v1/.gitignore new file mode 100644 index 00000000..5d32b237 --- /dev/null +++ b/owl-bot-staging/v1/.gitignore @@ -0,0 +1,14 @@ +**/*.log +**/node_modules +.coverage +coverage +.nyc_output +docs/ +out/ +build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ diff --git a/owl-bot-staging/v1/.jsdoc.js b/owl-bot-staging/v1/.jsdoc.js new file mode 100644 index 00000000..21870f2a --- /dev/null +++ b/owl-bot-staging/v1/.jsdoc.js @@ -0,0 +1,55 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2022 Google LLC', + includeDate: false, + sourceFiles: false, + systemName: '@google-cloud/bigquery-storage', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/owl-bot-staging/v1/.mocharc.js b/owl-bot-staging/v1/.mocharc.js new file mode 100644 index 00000000..481c522b --- /dev/null +++ b/owl-bot-staging/v1/.mocharc.js @@ -0,0 +1,33 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000 +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/owl-bot-staging/v1/.prettierrc.js b/owl-bot-staging/v1/.prettierrc.js new file mode 100644 index 00000000..494e1478 --- /dev/null +++ b/owl-bot-staging/v1/.prettierrc.js @@ -0,0 +1,22 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/owl-bot-staging/v1/README.md b/owl-bot-staging/v1/README.md new file mode 100644 index 00000000..f5dcfbaf --- /dev/null +++ b/owl-bot-staging/v1/README.md @@ -0,0 +1 @@ +Storage: Nodejs Client diff --git a/owl-bot-staging/v1/linkinator.config.json b/owl-bot-staging/v1/linkinator.config.json new file mode 100644 index 00000000..befd23c8 --- /dev/null +++ b/owl-bot-staging/v1/linkinator.config.json @@ -0,0 +1,16 @@ +{ + "recurse": true, + "skip": [ + "https://codecov.io/gh/googleapis/", + "www.googleapis.com", + "img.shields.io", + "https://console.cloud.google.com/cloudshell", + "https://support.google.com" + ], + "silent": true, + "concurrency": 5, + "retry": true, + "retryErrors": true, + "retryErrorsCount": 5, + "retryErrorsJitter": 3000 +} diff --git a/owl-bot-staging/v1/package.json b/owl-bot-staging/v1/package.json new file mode 100644 index 00000000..240c0964 --- /dev/null +++ b/owl-bot-staging/v1/package.json @@ -0,0 +1,65 @@ +{ + "name": "@google-cloud/bigquery-storage", + "version": "0.1.0", + "description": "Storage client for Node.js", + "repository": "googleapis/nodejs-storage", + "license": "Apache-2.0", + "author": "Google LLC", + "main": "build/src/index.js", + "files": [ + "build/src", + "build/protos" + ], + "keywords": [ + "google apis client", + "google api client", + "google apis", + "google api", + "google", + "google cloud platform", + "google cloud", + "cloud", + "google storage", + "storage", + "big query read", + "big query write" + ], + "scripts": { + "clean": "gts clean", + "compile": "tsc -p . && cp -r protos build/", + "compile-protos": "compileProtos src", + "docs": "jsdoc -c .jsdoc.js", + "predocs-test": "npm run docs", + "docs-test": "linkinator docs", + "fix": "gts fix", + "lint": "gts check", + "prepare": "npm run compile-protos && npm run compile", + "system-test": "c8 mocha build/system-test", + "test": "c8 mocha build/test" + }, + "dependencies": { + "google-gax": "^2.29.4" + }, + "devDependencies": { + "@types/mocha": "^9.1.0", + "@types/node": "^16.0.0", + "@types/sinon": "^10.0.8", + "c8": "^7.11.0", + "gts": "^3.1.0", + "jsdoc": "^3.6.7", + "jsdoc-fresh": "^1.1.1", + "jsdoc-region-tag": "^1.3.1", + "linkinator": "^3.0.0", + "mocha": "^9.1.4", + "null-loader": "^4.0.1", + "pack-n-play": "^1.0.0-2", + "sinon": "^13.0.0", + "ts-loader": "^9.2.6", + "typescript": "^4.5.5", + "webpack": "^5.67.0", + "webpack-cli": "^4.9.1" + }, + "engines": { + "node": ">=v10.24.0" + } +} diff --git a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/arrow.proto b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/arrow.proto new file mode 100644 index 00000000..6d3f6080 --- /dev/null +++ b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/arrow.proto @@ -0,0 +1,64 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; +option java_multiple_files = true; +option java_outer_classname = "ArrowProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; + +// Arrow schema as specified in +// https://arrow.apache.org/docs/python/api/datatypes.html +// and serialized to bytes using IPC: +// https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc +// +// See code samples on how this message can be deserialized. +message ArrowSchema { + // IPC serialized Arrow schema. + bytes serialized_schema = 1; +} + +// Arrow RecordBatch. +message ArrowRecordBatch { + // IPC-serialized Arrow RecordBatch. + bytes serialized_record_batch = 1; + + // [Deprecated] The count of rows in `serialized_record_batch`. + // Please use the format-independent ReadRowsResponse.row_count instead. + int64 row_count = 2 [deprecated = true]; +} + +// Contains options specific to Arrow Serialization. +message ArrowSerializationOptions { + // Compression codec's supported by Arrow. + enum CompressionCodec { + // If unspecified no compression will be used. + COMPRESSION_UNSPECIFIED = 0; + + // LZ4 Frame (https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md) + LZ4_FRAME = 1; + + // Zstandard compression. + ZSTD = 2; + } + + // The compression codec to use for Arrow buffers in serialized record + // batches. + CompressionCodec buffer_compression = 2; +} diff --git a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/avro.proto b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/avro.proto new file mode 100644 index 00000000..15de2db5 --- /dev/null +++ b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/avro.proto @@ -0,0 +1,41 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; +option java_multiple_files = true; +option java_outer_classname = "AvroProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; + +// Avro schema. +message AvroSchema { + // Json serialized schema, as described at + // https://avro.apache.org/docs/1.8.1/spec.html. + string schema = 1; +} + +// Avro rows. +message AvroRows { + // Binary serialized rows in a block. + bytes serialized_binary_rows = 1; + + // [Deprecated] The count of rows in the returning block. + // Please use the format-independent ReadRowsResponse.row_count instead. + int64 row_count = 2 [deprecated = true]; +} diff --git a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/protobuf.proto b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/protobuf.proto new file mode 100644 index 00000000..b3754acf --- /dev/null +++ b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/protobuf.proto @@ -0,0 +1,48 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +import "google/protobuf/descriptor.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; +option java_multiple_files = true; +option java_outer_classname = "ProtoBufProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; + +// ProtoSchema describes the schema of the serialized protocol buffer data rows. +message ProtoSchema { + // Descriptor for input message. The provided descriptor must be self + // contained, such that data rows sent can be fully decoded using only the + // single descriptor. For data rows that are compositions of multiple + // independent messages, this means the descriptor may need to be transformed + // to only use nested types: + // https://developers.google.com/protocol-buffers/docs/proto#nested + // + // For additional information for how proto types and values map onto BigQuery + // see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions + google.protobuf.DescriptorProto proto_descriptor = 1; +} + +message ProtoRows { + // A sequence of rows serialized as a Protocol Buffer. + // + // See https://developers.google.com/protocol-buffers/docs/overview for more + // information on deserializing this field. + repeated bytes serialized_rows = 1; +} diff --git a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/storage.proto b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/storage.proto new file mode 100644 index 00000000..67c6c8a0 --- /dev/null +++ b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/storage.proto @@ -0,0 +1,624 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1/arrow.proto"; +import "google/cloud/bigquery/storage/v1/avro.proto"; +import "google/cloud/bigquery/storage/v1/protobuf.proto"; +import "google/cloud/bigquery/storage/v1/stream.proto"; +import "google/cloud/bigquery/storage/v1/table.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; +option java_multiple_files = true; +option java_outer_classname = "StorageProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; +option (google.api.resource_definition) = { + type: "bigquery.googleapis.com/Table" + pattern: "projects/{project}/datasets/{dataset}/tables/{table}" +}; + +// BigQuery Read API. +// +// The Read API can be used to read data from BigQuery. +service BigQueryRead { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a new read session. A read session divides the contents of a + // BigQuery table into one or more streams, which can then be used to read + // data from the table. The read session also specifies properties of the + // data to be read, such as a list of columns or a push-down filter describing + // the rows to be returned. + // + // A particular row can be read by at most one stream. When the caller has + // reached the end of each stream in the session, then all the data in the + // table has been read. + // + // Data is assigned to each stream such that roughly the same number of + // rows can be read from each stream. Because the server-side unit for + // assigning data is collections of rows, the API does not guarantee that + // each stream will return the same number or rows. Additionally, the + // limits are enforced based on the number of pre-filtered rows, so some + // filters can lead to lopsided assignments. + // + // Read sessions automatically expire 6 hours after they are created and do + // not require manual clean-up by the caller. + rpc CreateReadSession(CreateReadSessionRequest) returns (ReadSession) { + option (google.api.http) = { + post: "/v1/{read_session.table=projects/*/datasets/*/tables/*}" + body: "*" + }; + option (google.api.method_signature) = "parent,read_session,max_stream_count"; + } + + // Reads rows from the stream in the format prescribed by the ReadSession. + // Each response contains one or more table rows, up to a maximum of 100 MiB + // per response; read requests which attempt to read individual rows larger + // than 100 MiB will fail. + // + // Each request also returns a set of stream statistics reflecting the current + // state of the stream. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + option (google.api.http) = { + get: "/v1/{read_stream=projects/*/locations/*/sessions/*/streams/*}" + }; + option (google.api.method_signature) = "read_stream,offset"; + } + + // Splits a given `ReadStream` into two `ReadStream` objects. These + // `ReadStream` objects are referred to as the primary and the residual + // streams of the split. The original `ReadStream` can still be read from in + // the same manner as before. Both of the returned `ReadStream` objects can + // also be read from, and the rows returned by both child streams will be + // the same as the rows read from the original stream. + // + // Moreover, the two child streams will be allocated back-to-back in the + // original `ReadStream`. Concretely, it is guaranteed that for streams + // original, primary, and residual, that original[0-j] = primary[0-j] and + // original[j-n] = residual[0-m] once the streams have been read to + // completion. + rpc SplitReadStream(SplitReadStreamRequest) returns (SplitReadStreamResponse) { + option (google.api.http) = { + get: "/v1/{name=projects/*/locations/*/sessions/*/streams/*}" + }; + } +} + +// BigQuery Write API. +// +// The Write API can be used to write data to BigQuery. +// +// For supplementary information about the Write API, see: +// https://cloud.google.com/bigquery/docs/write-api +service BigQueryWrite { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/bigquery.insertdata," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a write stream to the given table. + // Additionally, every table has a special stream named '_default' + // to which data can be written. This stream doesn't need to be created using + // CreateWriteStream. It is a stream that can be used simultaneously by any + // number of clients. Data written to this stream is considered committed as + // soon as an acknowledgement is received. + rpc CreateWriteStream(CreateWriteStreamRequest) returns (WriteStream) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/datasets/*/tables/*}" + body: "write_stream" + }; + option (google.api.method_signature) = "parent,write_stream"; + } + + // Appends data to the given stream. + // + // If `offset` is specified, the `offset` is checked against the end of + // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an + // attempt is made to append to an offset beyond the current end of the stream + // or `ALREADY_EXISTS` if user provides an `offset` that has already been + // written to. User can retry with adjusted offset within the same RPC + // connection. If `offset` is not specified, append happens at the end of the + // stream. + // + // The response contains an optional offset at which the append + // happened. No offset information will be returned for appends to a + // default stream. + // + // Responses are received in the same order in which requests are sent. + // There will be one response for each successful inserted request. Responses + // may optionally embed error information if the originating AppendRequest was + // not successfully processed. + // + // The specifics of when successfully appended data is made visible to the + // table are governed by the type of stream: + // + // * For COMMITTED streams (which includes the default stream), data is + // visible immediately upon successful append. + // + // * For BUFFERED streams, data is made visible via a subsequent `FlushRows` + // rpc which advances a cursor to a newer offset in the stream. + // + // * For PENDING streams, data is not made visible until the stream itself is + // finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly + // committed via the `BatchCommitWriteStreams` rpc. + // + // Note: For users coding against the gRPC api directly, it may be + // necessary to supply the x-goog-request-params system parameter + // with `write_stream=`. + // + // More information about system parameters: + // https://cloud.google.com/apis/docs/system-parameters + rpc AppendRows(stream AppendRowsRequest) returns (stream AppendRowsResponse) { + option (google.api.http) = { + post: "/v1/{write_stream=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "write_stream"; + } + + // Gets information about a write stream. + rpc GetWriteStream(GetWriteStreamRequest) returns (WriteStream) { + option (google.api.http) = { + post: "/v1/{name=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Finalize a write stream so that no new data can be appended to the + // stream. Finalize is not supported on the '_default' stream. + rpc FinalizeWriteStream(FinalizeWriteStreamRequest) returns (FinalizeWriteStreamResponse) { + option (google.api.http) = { + post: "/v1/{name=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Atomically commits a group of `PENDING` streams that belong to the same + // `parent` table. + // + // Streams must be finalized before commit and cannot be committed multiple + // times. Once a stream is committed, data in the stream becomes available + // for read operations. + rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest) returns (BatchCommitWriteStreamsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/datasets/*/tables/*}" + }; + option (google.api.method_signature) = "parent"; + } + + // Flushes rows to a BUFFERED stream. + // + // If users are appending rows to BUFFERED stream, flush operation is + // required in order for the rows to become available for reading. A + // Flush operation flushes up to any previously flushed offset in a BUFFERED + // stream, to the offset specified in the request. + // + // Flush is not supported on the _default stream, since it is not BUFFERED. + rpc FlushRows(FlushRowsRequest) returns (FlushRowsResponse) { + option (google.api.http) = { + post: "/v1/{write_stream=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "write_stream"; + } +} + +// Request message for `CreateReadSession`. +message CreateReadSessionRequest { + // Required. The request project that owns the session, in the form of + // `projects/{project_id}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Required. Session to be created. + ReadSession read_session = 2 [(google.api.field_behavior) = REQUIRED]; + + // Max initial number of streams. If unset or zero, the server will + // provide a value of streams so as to produce reasonable throughput. Must be + // non-negative. The number of streams may be lower than the requested number, + // depending on the amount parallelism that is reasonable for the table. Error + // will be returned if the max count is greater than the current system + // max limit of 1,000. + // + // Streams must be read starting from offset 0. + int32 max_stream_count = 3; +} + +// Request message for `ReadRows`. +message ReadRowsRequest { + // Required. Stream to read rows from. + string read_stream = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/ReadStream" + } + ]; + + // The offset requested must be less than the last row read from Read. + // Requesting a larger offset is undefined. If not specified, start reading + // from offset zero. + int64 offset = 2; +} + +// Information on if the current connection is being throttled. +message ThrottleState { + // How much this connection is being throttled. Zero means no throttling, + // 100 means fully throttled. + int32 throttle_percent = 1; +} + +// Estimated stream statistics for a given read Stream. +message StreamStats { + message Progress { + // The fraction of rows assigned to the stream that have been processed by + // the server so far, not including the rows in the current response + // message. + // + // This value, along with `at_response_end`, can be used to interpolate + // the progress made as the rows in the message are being processed using + // the following formula: `at_response_start + (at_response_end - + // at_response_start) * rows_processed_from_response / rows_in_response`. + // + // Note that if a filter is provided, the `at_response_end` value of the + // previous response may not necessarily be equal to the + // `at_response_start` value of the current response. + double at_response_start = 1; + + // Similar to `at_response_start`, except that this value includes the + // rows in the current response. + double at_response_end = 2; + } + + // Represents the progress of the current stream. + Progress progress = 2; +} + +// Response from calling `ReadRows` may include row data, progress and +// throttling information. +message ReadRowsResponse { + // Row data is returned in format specified during session creation. + oneof rows { + // Serialized row data in AVRO format. + AvroRows avro_rows = 3; + + // Serialized row data in Arrow RecordBatch format. + ArrowRecordBatch arrow_record_batch = 4; + } + + // Number of serialized rows in the rows block. + int64 row_count = 6; + + // Statistics for the stream. + StreamStats stats = 2; + + // Throttling state. If unset, the latest response still describes + // the current throttling status. + ThrottleState throttle_state = 5; + + // The schema for the read. If read_options.selected_fields is set, the + // schema may be different from the table schema as it will only contain + // the selected fields. This schema is equivelant to the one returned by + // CreateSession. This field is only populated in the first ReadRowsResponse + // RPC. + oneof schema { + // Output only. Avro schema. + AvroSchema avro_schema = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Arrow schema. + ArrowSchema arrow_schema = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + } +} + +// Request message for `SplitReadStream`. +message SplitReadStreamRequest { + // Required. Name of the stream to split. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/ReadStream" + } + ]; + + // A value in the range (0.0, 1.0) that specifies the fractional point at + // which the original stream should be split. The actual split point is + // evaluated on pre-filtered rows, so if a filter is provided, then there is + // no guarantee that the division of the rows between the new child streams + // will be proportional to this fractional value. Additionally, because the + // server-side unit for assigning data is collections of rows, this fraction + // will always map to a data storage boundary on the server side. + double fraction = 2; +} + +// Response message for `SplitReadStream`. +message SplitReadStreamResponse { + // Primary stream, which contains the beginning portion of + // |original_stream|. An empty value indicates that the original stream can no + // longer be split. + ReadStream primary_stream = 1; + + // Remainder stream, which contains the tail of |original_stream|. An empty + // value indicates that the original stream can no longer be split. + ReadStream remainder_stream = 2; +} + +// Request message for `CreateWriteStream`. +message CreateWriteStreamRequest { + // Required. Reference to the table to which the stream belongs, in the format + // of `projects/{project}/datasets/{dataset}/tables/{table}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquery.googleapis.com/Table" + } + ]; + + // Required. Stream to be created. + WriteStream write_stream = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for `AppendRows`. +// +// Due to the nature of AppendRows being a bidirectional streaming RPC, certain +// parts of the AppendRowsRequest need only be specified for the first request +// sent each time the gRPC network connection is opened/reopened. +message AppendRowsRequest { + // ProtoData contains the data rows and schema when constructing append + // requests. + message ProtoData { + // Proto schema used to serialize the data. This value only needs to be + // provided as part of the first request on a gRPC network connection, + // and will be ignored for subsequent requests on the connection. + ProtoSchema writer_schema = 1; + + // Serialized row data in protobuf message format. + // Currently, the backend expects the serialized rows to adhere to + // proto2 semantics when appending rows, particularly with respect to + // how default values are encoded. + ProtoRows rows = 2; + } + + // Required. The write_stream identifies the target of the append operation, and only + // needs to be specified as part of the first request on the gRPC connection. + // If provided for subsequent requests, it must match the value of the first + // request. + // + // For explicitly created write streams, the format is: + // + // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}` + // + // For the special default stream, the format is: + // + // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`. + string write_stream = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; + + // If present, the write is only performed if the next append offset is same + // as the provided value. If not present, the write is performed at the + // current end of stream. Specifying a value for this field is not allowed + // when calling AppendRows for the '_default' stream. + google.protobuf.Int64Value offset = 2; + + // Input rows. The `writer_schema` field must be specified at the initial + // request and currently, it will be ignored if specified in following + // requests. Following requests must have data in the same format as the + // initial request. + oneof rows { + // Rows in proto format. + ProtoData proto_rows = 4; + } + + // Id set by client to annotate its identity. Only initial request setting is + // respected. + string trace_id = 6; +} + +// Response message for `AppendRows`. +message AppendRowsResponse { + // AppendResult is returned for successful append requests. + message AppendResult { + // The row offset at which the last append occurred. The offset will not be + // set if appending using default streams. + google.protobuf.Int64Value offset = 1; + } + + oneof response { + // Result if the append is successful. + AppendResult append_result = 1; + + // Error returned when problems were encountered. If present, + // it indicates rows were not accepted into the system. + // Users can retry or continue with other append requests within the + // same connection. + // + // Additional information about error signalling: + // + // ALREADY_EXISTS: Happens when an append specified an offset, and the + // backend already has received data at this offset. Typically encountered + // in retry scenarios, and can be ignored. + // + // OUT_OF_RANGE: Returned when the specified offset in the stream is beyond + // the current end of the stream. + // + // INVALID_ARGUMENT: Indicates a malformed request or data. + // + // ABORTED: Request processing is aborted because of prior failures. The + // request can be retried if previous failure is addressed. + // + // INTERNAL: Indicates server side error(s) that can be retried. + google.rpc.Status error = 2; + } + + // If backend detects a schema update, pass it to user so that user can + // use it to input new type of message. It will be empty when no schema + // updates have occurred. + TableSchema updated_schema = 3; +} + +// Request message for `GetWriteStreamRequest`. +message GetWriteStreamRequest { + // Required. Name of the stream to get, in the form of + // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; +} + +// Request message for `BatchCommitWriteStreams`. +message BatchCommitWriteStreamsRequest { + // Required. Parent table that all the streams should belong to, in the form of + // `projects/{project}/datasets/{dataset}/tables/{table}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquery.googleapis.com/Table" + } + ]; + + // Required. The group of streams that will be committed atomically. + repeated string write_streams = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Response message for `BatchCommitWriteStreams`. +message BatchCommitWriteStreamsResponse { + // The time at which streams were committed in microseconds granularity. + // This field will only exist when there are no stream errors. + // **Note** if this field is not set, it means the commit was not successful. + google.protobuf.Timestamp commit_time = 1; + + // Stream level error if commit failed. Only streams with error will be in + // the list. + // If empty, there is no error and all streams are committed successfully. + // If non empty, certain streams have errors and ZERO stream is committed due + // to atomicity guarantee. + repeated StorageError stream_errors = 2; +} + +// Request message for invoking `FinalizeWriteStream`. +message FinalizeWriteStreamRequest { + // Required. Name of the stream to finalize, in the form of + // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; +} + +// Response message for `FinalizeWriteStream`. +message FinalizeWriteStreamResponse { + // Number of rows in the finalized stream. + int64 row_count = 1; +} + +// Request message for `FlushRows`. +message FlushRowsRequest { + // Required. The stream that is the target of the flush operation. + string write_stream = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/WriteStream" + } + ]; + + // Ending offset of the flush operation. Rows before this offset(including + // this offset) will be flushed. + google.protobuf.Int64Value offset = 2; +} + +// Respond message for `FlushRows`. +message FlushRowsResponse { + // The rows before this offset (including this offset) are flushed. + int64 offset = 1; +} + +// Structured custom BigQuery Storage error message. The error can be attached +// as error details in the returned rpc Status. In particular, the use of error +// codes allows more structured error handling, and reduces the need to evaluate +// unstructured error text strings. +message StorageError { + // Error code for `StorageError`. + enum StorageErrorCode { + // Default error. + STORAGE_ERROR_CODE_UNSPECIFIED = 0; + + // Table is not found in the system. + TABLE_NOT_FOUND = 1; + + // Stream is already committed. + STREAM_ALREADY_COMMITTED = 2; + + // Stream is not found. + STREAM_NOT_FOUND = 3; + + // Invalid Stream type. + // For example, you try to commit a stream that is not pending. + INVALID_STREAM_TYPE = 4; + + // Invalid Stream state. + // For example, you try to commit a stream that is not finalized or is + // garbaged. + INVALID_STREAM_STATE = 5; + + // Stream is finalized. + STREAM_FINALIZED = 6; + + // There is a schema mismatch and it is caused by user schema has extra + // field than bigquery schema. + SCHEMA_MISMATCH_EXTRA_FIELDS = 7; + + // Offset already exists. + OFFSET_ALREADY_EXISTS = 8; + + // Offset out of range. + OFFSET_OUT_OF_RANGE = 9; + } + + // BigQuery Storage specific error code. + StorageErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} diff --git a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/stream.proto b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/stream.proto new file mode 100644 index 00000000..bd1fa2ce --- /dev/null +++ b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/stream.proto @@ -0,0 +1,217 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1/arrow.proto"; +import "google/cloud/bigquery/storage/v1/avro.proto"; +import "google/cloud/bigquery/storage/v1/table.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; +option java_multiple_files = true; +option java_outer_classname = "StreamProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; + +// Data format for input or output data. +enum DataFormat { + DATA_FORMAT_UNSPECIFIED = 0; + + // Avro is a standard open source row based file format. + // See https://avro.apache.org/ for more details. + AVRO = 1; + + // Arrow is a standard open source column-based message format. + // See https://arrow.apache.org/ for more details. + ARROW = 2; +} + +// Information about the ReadSession. +message ReadSession { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadSession" + pattern: "projects/{project}/locations/{location}/sessions/{session}" + }; + + // Additional attributes when reading a table. + message TableModifiers { + // The snapshot time of the table. If not set, interpreted as now. + google.protobuf.Timestamp snapshot_time = 1; + } + + // Options dictating how we read a table. + message TableReadOptions { + // Names of the fields in the table that should be read. If empty, all + // fields will be read. If the specified field is a nested field, all + // the sub-fields in the field will be selected. The output field order is + // unrelated to the order of fields in selected_fields. + repeated string selected_fields = 1; + + // SQL text filtering statement, similar to a WHERE clause in a query. + // Aggregates are not supported. + // + // Examples: "int_field > 5" + // "date_field = CAST('2014-9-27' as DATE)" + // "nullable_field is not NULL" + // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" + // "numeric_field BETWEEN 1.0 AND 5.0" + // + // Restricted to a maximum length for 1 MB. + string row_restriction = 2; + + oneof output_format_serialization_options { + // Optional. Options specific to the Apache Arrow output format. + ArrowSerializationOptions arrow_serialization_options = 3 [(google.api.field_behavior) = OPTIONAL]; + } + } + + // Output only. Unique identifier for the session, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time at which the session becomes invalid. After this time, subsequent + // requests to read this Session will return errors. The expire_time is + // automatically assigned and currently cannot be specified or updated. + google.protobuf.Timestamp expire_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Immutable. Data format of the output data. + DataFormat data_format = 3 [(google.api.field_behavior) = IMMUTABLE]; + + // The schema for the read. If read_options.selected_fields is set, the + // schema may be different from the table schema as it will only contain + // the selected fields. + oneof schema { + // Output only. Avro schema. + AvroSchema avro_schema = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Arrow schema. + ArrowSchema arrow_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Immutable. Table that this ReadSession is reading from, in the form + // `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}` + string table = 6 [ + (google.api.field_behavior) = IMMUTABLE, + (google.api.resource_reference) = { + type: "bigquery.googleapis.com/Table" + } + ]; + + // Optional. Any modifiers which are applied when reading from the specified table. + TableModifiers table_modifiers = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Read options for this session (e.g. column selection, filters). + TableReadOptions read_options = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. A list of streams created with the session. + // + // At least one stream is created with the session. In the future, larger + // request_stream_count values *may* result in this list being unpopulated, + // in that case, the user will need to use a List method to get the streams + // instead, which is not yet available. + repeated ReadStream streams = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. An estimate on the number of bytes this session will scan when + // all streams are completely consumed. This estimate is based on + // metadata from the table which might be incomplete or stale. + int64 estimated_total_bytes_scanned = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. ID set by client to annotate a session identity. This does not need + // to be strictly unique, but instead the same ID should be used to group + // logically connected sessions (e.g. All using the same ID for all sessions + // needed to complete a Spark SQL query is reasonable). + // + // Maximum length is 256 bytes. + string trace_id = 13 [(google.api.field_behavior) = OPTIONAL]; +} + +// Information about a single stream that gets data out of the storage system. +// Most of the information about `ReadStream` instances is aggregated, making +// `ReadStream` lightweight. +message ReadStream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadStream" + pattern: "projects/{project}/locations/{location}/sessions/{session}/streams/{stream}" + }; + + // Output only. Name of the stream, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Information about a single stream that gets data inside the storage system. +message WriteStream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/WriteStream" + pattern: "projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}" + }; + + // Type enum of the stream. + enum Type { + // Unknown type. + TYPE_UNSPECIFIED = 0; + + // Data will commit automatically and appear as soon as the write is + // acknowledged. + COMMITTED = 1; + + // Data is invisible until the stream is committed. + PENDING = 2; + + // Data is only visible up to the offset to which it was flushed. + BUFFERED = 3; + } + + // Mode enum of the stream. + enum WriteMode { + // Unknown type. + WRITE_MODE_UNSPECIFIED = 0; + + // Insert new records into the table. + // It is the default value if customers do not specify it. + INSERT = 1; + } + + // Output only. Name of the stream, in the form + // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Immutable. Type of the stream. + Type type = 2 [(google.api.field_behavior) = IMMUTABLE]; + + // Output only. Create time of the stream. For the _default stream, this is the + // creation_time of the table. + google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Commit time of the stream. + // If a stream is of `COMMITTED` type, then it will have a commit_time same as + // `create_time`. If the stream is of `PENDING` type, empty commit_time + // means it is not committed. + google.protobuf.Timestamp commit_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The schema of the destination table. It is only returned in + // `CreateWriteStream` response. Caller should generate data that's + // compatible with this schema to send in initial `AppendRowsRequest`. + // The table schema could go out of date during the life time of the stream. + TableSchema table_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Immutable. Mode of the stream. + WriteMode write_mode = 7 [(google.api.field_behavior) = IMMUTABLE]; +} diff --git a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/table.proto b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/table.proto new file mode 100644 index 00000000..545f6292 --- /dev/null +++ b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/table.proto @@ -0,0 +1,164 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1; + +import "google/api/field_behavior.proto"; + +option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; +option java_multiple_files = true; +option java_outer_classname = "TableProto"; +option java_package = "com.google.cloud.bigquery.storage.v1"; +option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; + +// Schema of a table. +message TableSchema { + // Describes the fields in a table. + repeated TableFieldSchema fields = 1; +} + +// TableFieldSchema defines a single field/column within a table schema. +message TableFieldSchema { + enum Type { + // Illegal value + TYPE_UNSPECIFIED = 0; + + // 64K, UTF8 + STRING = 1; + + // 64-bit signed + INT64 = 2; + + // 64-bit IEEE floating point + DOUBLE = 3; + + // Aggregate type + STRUCT = 4; + + // 64K, Binary + BYTES = 5; + + // 2-valued + BOOL = 6; + + // 64-bit signed usec since UTC epoch + TIMESTAMP = 7; + + // Civil date - Year, Month, Day + DATE = 8; + + // Civil time - Hour, Minute, Second, Microseconds + TIME = 9; + + // Combination of civil date and civil time + DATETIME = 10; + + // Geography object + GEOGRAPHY = 11; + + // Numeric value + NUMERIC = 12; + + // BigNumeric value + BIGNUMERIC = 13; + + // Interval + INTERVAL = 14; + + // JSON, String + JSON = 15; + } + + enum Mode { + // Illegal value + MODE_UNSPECIFIED = 0; + + NULLABLE = 1; + + REQUIRED = 2; + + REPEATED = 3; + } + + // Required. The field name. The name must contain only letters (a-z, A-Z), + // numbers (0-9), or underscores (_), and must start with a letter or + // underscore. The maximum length is 128 characters. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The field data type. + Type type = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The field mode. The default value is NULLABLE. + Mode mode = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Describes the nested schema fields if the type property is set to STRUCT. + repeated TableFieldSchema fields = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The field description. The maximum length is 1,024 characters. + string description = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Maximum length of values of this field for STRINGS or BYTES. + // + // If max_length is not specified, no maximum length constraint is imposed + // on this field. + // + // If type = "STRING", then max_length represents the maximum UTF-8 + // length of strings in this field. + // + // If type = "BYTES", then max_length represents the maximum number of + // bytes in this field. + // + // It is invalid to set this field if type is not "STRING" or "BYTES". + int64 max_length = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Precision (maximum number of total digits in base 10) and scale + // (maximum number of digits in the fractional part in base 10) constraints + // for values of this field for NUMERIC or BIGNUMERIC. + // + // It is invalid to set precision or scale if type is not "NUMERIC" or + // "BIGNUMERIC". + // + // If precision and scale are not specified, no value range constraint is + // imposed on this field insofar as values are permitted by the type. + // + // Values of this NUMERIC or BIGNUMERIC field must be in this range when: + // + // * Precision (P) and scale (S) are specified: + // [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)] + // * Precision (P) is specified but not scale (and thus scale is + // interpreted to be equal to zero): + // [-10^P + 1, 10^P - 1]. + // + // Acceptable values for precision and scale if both are specified: + // + // * If type = "NUMERIC": + // 1 <= precision - scale <= 29 and 0 <= scale <= 9. + // * If type = "BIGNUMERIC": + // 1 <= precision - scale <= 38 and 0 <= scale <= 38. + // + // Acceptable values for precision if only precision is specified but not + // scale (and thus scale is interpreted to be equal to zero): + // + // * If type = "NUMERIC": 1 <= precision <= 29. + // * If type = "BIGNUMERIC": 1 <= precision <= 38. + // + // If scale is specified but not precision, then it is invalid. + int64 precision = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. See documentation for precision. + int64 scale = 9 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_read.create_read_session.js b/owl-bot-staging/v1/samples/generated/v1/big_query_read.create_read_session.js new file mode 100644 index 00000000..f4d2f950 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated/v1/big_query_read.create_read_session.js @@ -0,0 +1,74 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(parent, readSession) { + // [START bigquerystorage_v1_generated_BigQueryRead_CreateReadSession_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The request project that owns the session, in the form of + * `projects/{project_id}`. + */ + // const parent = 'abc123' + /** + * Required. Session to be created. + */ + // const readSession = {} + /** + * Max initial number of streams. If unset or zero, the server will + * provide a value of streams so as to produce reasonable throughput. Must be + * non-negative. The number of streams may be lower than the requested number, + * depending on the amount parallelism that is reasonable for the table. Error + * will be returned if the max count is greater than the current system + * max limit of 1,000. + * Streams must be read starting from offset 0. + */ + // const maxStreamCount = 1234 + + // Imports the Storage library + const {BigQueryReadClient} = require('@google-cloud/bigquery-storage').v1; + + // Instantiates a client + const storageClient = new BigQueryReadClient(); + + async function callCreateReadSession() { + // Construct request + const request = { + parent, + readSession, + }; + + // Run request + const response = await storageClient.createReadSession(request); + console.log(response); + } + + callCreateReadSession(); + // [END bigquerystorage_v1_generated_BigQueryRead_CreateReadSession_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_read.read_rows.js b/owl-bot-staging/v1/samples/generated/v1/big_query_read.read_rows.js new file mode 100644 index 00000000..f5f781a4 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated/v1/big_query_read.read_rows.js @@ -0,0 +1,66 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(readStream) { + // [START bigquerystorage_v1_generated_BigQueryRead_ReadRows_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Stream to read rows from. + */ + // const readStream = 'abc123' + /** + * The offset requested must be less than the last row read from Read. + * Requesting a larger offset is undefined. If not specified, start reading + * from offset zero. + */ + // const offset = 1234 + + // Imports the Storage library + const {BigQueryReadClient} = require('@google-cloud/bigquery-storage').v1; + + // Instantiates a client + const storageClient = new BigQueryReadClient(); + + async function callReadRows() { + // Construct request + const request = { + readStream, + }; + + // Run request + const stream = await storageClient.readRows(request); + stream.on('data', (response) => { console.log(response) }); + stream.on('error', (err) => { throw(err) }); + stream.on('end', () => { /* API call completed */ }); + } + + callReadRows(); + // [END bigquerystorage_v1_generated_BigQueryRead_ReadRows_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_read.split_read_stream.js b/owl-bot-staging/v1/samples/generated/v1/big_query_read.split_read_stream.js new file mode 100644 index 00000000..6e146955 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated/v1/big_query_read.split_read_stream.js @@ -0,0 +1,68 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(name) { + // [START bigquerystorage_v1_generated_BigQueryRead_SplitReadStream_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Name of the stream to split. + */ + // const name = 'abc123' + /** + * A value in the range (0.0, 1.0) that specifies the fractional point at + * which the original stream should be split. The actual split point is + * evaluated on pre-filtered rows, so if a filter is provided, then there is + * no guarantee that the division of the rows between the new child streams + * will be proportional to this fractional value. Additionally, because the + * server-side unit for assigning data is collections of rows, this fraction + * will always map to a data storage boundary on the server side. + */ + // const fraction = 1234 + + // Imports the Storage library + const {BigQueryReadClient} = require('@google-cloud/bigquery-storage').v1; + + // Instantiates a client + const storageClient = new BigQueryReadClient(); + + async function callSplitReadStream() { + // Construct request + const request = { + name, + }; + + // Run request + const response = await storageClient.splitReadStream(request); + console.log(response); + } + + callSplitReadStream(); + // [END bigquerystorage_v1_generated_BigQueryRead_SplitReadStream_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_write.append_rows.js b/owl-bot-staging/v1/samples/generated/v1/big_query_write.append_rows.js new file mode 100644 index 00000000..9cefbb22 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated/v1/big_query_write.append_rows.js @@ -0,0 +1,85 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(writeStream) { + // [START bigquerystorage_v1_generated_BigQueryWrite_AppendRows_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The write_stream identifies the target of the append operation, and only + * needs to be specified as part of the first request on the gRPC connection. + * If provided for subsequent requests, it must match the value of the first + * request. + * For explicitly created write streams, the format is: + * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}` + * For the special default stream, the format is: + * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`. + */ + // const writeStream = 'abc123' + /** + * If present, the write is only performed if the next append offset is same + * as the provided value. If not present, the write is performed at the + * current end of stream. Specifying a value for this field is not allowed + * when calling AppendRows for the '_default' stream. + */ + // const offset = {} + /** + * Rows in proto format. + */ + // const protoRows = {} + /** + * Id set by client to annotate its identity. Only initial request setting is + * respected. + */ + // const traceId = 'abc123' + + // Imports the Storage library + const {BigQueryWriteClient} = require('@google-cloud/bigquery-storage').v1; + + // Instantiates a client + const storageClient = new BigQueryWriteClient(); + + async function callAppendRows() { + // Construct request + const request = { + writeStream, + }; + + // Run request + const stream = await storageClient.appendRows(); + stream.on('data', (response) => { console.log(response) }); + stream.on('error', (err) => { throw(err) }); + stream.on('end', () => { /* API call completed */ }); + stream.write(request); + stream.end(); + } + + callAppendRows(); + // [END bigquerystorage_v1_generated_BigQueryWrite_AppendRows_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_write.batch_commit_write_streams.js b/owl-bot-staging/v1/samples/generated/v1/big_query_write.batch_commit_write_streams.js new file mode 100644 index 00000000..9c258a30 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated/v1/big_query_write.batch_commit_write_streams.js @@ -0,0 +1,64 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(parent, writeStreams) { + // [START bigquerystorage_v1_generated_BigQueryWrite_BatchCommitWriteStreams_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Parent table that all the streams should belong to, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}`. + */ + // const parent = 'abc123' + /** + * Required. The group of streams that will be committed atomically. + */ + // const writeStreams = 'abc123' + + // Imports the Storage library + const {BigQueryWriteClient} = require('@google-cloud/bigquery-storage').v1; + + // Instantiates a client + const storageClient = new BigQueryWriteClient(); + + async function callBatchCommitWriteStreams() { + // Construct request + const request = { + parent, + writeStreams, + }; + + // Run request + const response = await storageClient.batchCommitWriteStreams(request); + console.log(response); + } + + callBatchCommitWriteStreams(); + // [END bigquerystorage_v1_generated_BigQueryWrite_BatchCommitWriteStreams_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_write.create_write_stream.js b/owl-bot-staging/v1/samples/generated/v1/big_query_write.create_write_stream.js new file mode 100644 index 00000000..839a8bf6 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated/v1/big_query_write.create_write_stream.js @@ -0,0 +1,64 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(parent, writeStream) { + // [START bigquerystorage_v1_generated_BigQueryWrite_CreateWriteStream_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Reference to the table to which the stream belongs, in the format + * of `projects/{project}/datasets/{dataset}/tables/{table}`. + */ + // const parent = 'abc123' + /** + * Required. Stream to be created. + */ + // const writeStream = {} + + // Imports the Storage library + const {BigQueryWriteClient} = require('@google-cloud/bigquery-storage').v1; + + // Instantiates a client + const storageClient = new BigQueryWriteClient(); + + async function callCreateWriteStream() { + // Construct request + const request = { + parent, + writeStream, + }; + + // Run request + const response = await storageClient.createWriteStream(request); + console.log(response); + } + + callCreateWriteStream(); + // [END bigquerystorage_v1_generated_BigQueryWrite_CreateWriteStream_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_write.finalize_write_stream.js b/owl-bot-staging/v1/samples/generated/v1/big_query_write.finalize_write_stream.js new file mode 100644 index 00000000..3ff3da7a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated/v1/big_query_write.finalize_write_stream.js @@ -0,0 +1,59 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(name) { + // [START bigquerystorage_v1_generated_BigQueryWrite_FinalizeWriteStream_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Name of the stream to finalize, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + */ + // const name = 'abc123' + + // Imports the Storage library + const {BigQueryWriteClient} = require('@google-cloud/bigquery-storage').v1; + + // Instantiates a client + const storageClient = new BigQueryWriteClient(); + + async function callFinalizeWriteStream() { + // Construct request + const request = { + name, + }; + + // Run request + const response = await storageClient.finalizeWriteStream(request); + console.log(response); + } + + callFinalizeWriteStream(); + // [END bigquerystorage_v1_generated_BigQueryWrite_FinalizeWriteStream_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_write.flush_rows.js b/owl-bot-staging/v1/samples/generated/v1/big_query_write.flush_rows.js new file mode 100644 index 00000000..751fdebf --- /dev/null +++ b/owl-bot-staging/v1/samples/generated/v1/big_query_write.flush_rows.js @@ -0,0 +1,63 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(writeStream) { + // [START bigquerystorage_v1_generated_BigQueryWrite_FlushRows_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. The stream that is the target of the flush operation. + */ + // const writeStream = 'abc123' + /** + * Ending offset of the flush operation. Rows before this offset(including + * this offset) will be flushed. + */ + // const offset = {} + + // Imports the Storage library + const {BigQueryWriteClient} = require('@google-cloud/bigquery-storage').v1; + + // Instantiates a client + const storageClient = new BigQueryWriteClient(); + + async function callFlushRows() { + // Construct request + const request = { + writeStream, + }; + + // Run request + const response = await storageClient.flushRows(request); + console.log(response); + } + + callFlushRows(); + // [END bigquerystorage_v1_generated_BigQueryWrite_FlushRows_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_write.get_write_stream.js b/owl-bot-staging/v1/samples/generated/v1/big_query_write.get_write_stream.js new file mode 100644 index 00000000..16507508 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated/v1/big_query_write.get_write_stream.js @@ -0,0 +1,59 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(name) { + // [START bigquerystorage_v1_generated_BigQueryWrite_GetWriteStream_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Name of the stream to get, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + */ + // const name = 'abc123' + + // Imports the Storage library + const {BigQueryWriteClient} = require('@google-cloud/bigquery-storage').v1; + + // Instantiates a client + const storageClient = new BigQueryWriteClient(); + + async function callGetWriteStream() { + // Construct request + const request = { + name, + }; + + // Run request + const response = await storageClient.getWriteStream(request); + console.log(response); + } + + callGetWriteStream(); + // [END bigquerystorage_v1_generated_BigQueryWrite_GetWriteStream_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json b/owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json new file mode 100644 index 00000000..a847f738 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json @@ -0,0 +1,415 @@ +{ + "clientLibrary": { + "name": "nodejs-storage", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.bigquery.storage.v1", + "version": "v1" + } + ] + }, + "snippets": [ + { + "regionTag": "bigquerystorage_v1_generated_BigQueryRead_CreateReadSession_async", + "title": "BigQueryRead createReadSession Sample", + "origin": "API_DEFINITION", + "description": " Creates a new read session. A read session divides the contents of a BigQuery table into one or more streams, which can then be used to read data from the table. The read session also specifies properties of the data to be read, such as a list of columns or a push-down filter describing the rows to be returned. A particular row can be read by at most one stream. When the caller has reached the end of each stream in the session, then all the data in the table has been read. Data is assigned to each stream such that roughly the same number of rows can be read from each stream. Because the server-side unit for assigning data is collections of rows, the API does not guarantee that each stream will return the same number or rows. Additionally, the limits are enforced based on the number of pre-filtered rows, so some filters can lead to lopsided assignments. Read sessions automatically expire 6 hours after they are created and do not require manual clean-up by the caller.", + "canonical": true, + "file": "big_query_read.create_read_session.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 66, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CreateReadSession", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.CreateReadSession", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "read_session", + "type": ".google.cloud.bigquery.storage.v1.ReadSession" + }, + { + "name": "max_stream_count", + "type": "TYPE_INT32" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.ReadSession", + "client": { + "shortName": "BigQueryReadClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryReadClient" + }, + "method": { + "shortName": "CreateReadSession", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.CreateReadSession", + "service": { + "shortName": "BigQueryRead", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryRead_ReadRows_async", + "title": "BigQueryRead readRows Sample", + "origin": "API_DEFINITION", + "description": " Reads rows from the stream in the format prescribed by the ReadSession. Each response contains one or more table rows, up to a maximum of 100 MiB per response; read requests which attempt to read individual rows larger than 100 MiB will fail. Each request also returns a set of stream statistics reflecting the current state of the stream.", + "canonical": true, + "file": "big_query_read.read_rows.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 58, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ReadRows", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.ReadRows", + "async": true, + "parameters": [ + { + "name": "read_stream", + "type": "TYPE_STRING" + }, + { + "name": "offset", + "type": "TYPE_INT64" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.ReadRowsResponse", + "client": { + "shortName": "BigQueryReadClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryReadClient" + }, + "method": { + "shortName": "ReadRows", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.ReadRows", + "service": { + "shortName": "BigQueryRead", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryRead_SplitReadStream_async", + "title": "BigQueryRead splitReadStream Sample", + "origin": "API_DEFINITION", + "description": " Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are referred to as the primary and the residual streams of the split. The original `ReadStream` can still be read from in the same manner as before. Both of the returned `ReadStream` objects can also be read from, and the rows returned by both child streams will be the same as the rows read from the original stream. Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. Concretely, it is guaranteed that for streams original, primary, and residual, that original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read to completion.", + "canonical": true, + "file": "big_query_read.split_read_stream.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 60, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "SplitReadStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.SplitReadStream", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + }, + { + "name": "fraction", + "type": "TYPE_DOUBLE" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.SplitReadStreamResponse", + "client": { + "shortName": "BigQueryReadClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryReadClient" + }, + "method": { + "shortName": "SplitReadStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.SplitReadStream", + "service": { + "shortName": "BigQueryRead", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_CreateWriteStream_async", + "title": "BigQueryRead createWriteStream Sample", + "origin": "API_DEFINITION", + "description": " Creates a write stream to the given table. Additionally, every table has a special stream named '_default' to which data can be written. This stream doesn't need to be created using CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. Data written to this stream is considered committed as soon as an acknowledgement is received.", + "canonical": true, + "file": "big_query_write.create_write_stream.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 56, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CreateWriteStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.CreateWriteStream", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "write_stream", + "type": ".google.cloud.bigquery.storage.v1.WriteStream" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.WriteStream", + "client": { + "shortName": "BigQueryWriteClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" + }, + "method": { + "shortName": "CreateWriteStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.CreateWriteStream", + "service": { + "shortName": "BigQueryWrite", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_AppendRows_async", + "title": "BigQueryRead appendRows Sample", + "origin": "API_DEFINITION", + "description": " Appends data to the given stream. If `offset` is specified, the `offset` is checked against the end of stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an attempt is made to append to an offset beyond the current end of the stream or `ALREADY_EXISTS` if user provides an `offset` that has already been written to. User can retry with adjusted offset within the same RPC connection. If `offset` is not specified, append happens at the end of the stream. The response contains an optional offset at which the append happened. No offset information will be returned for appends to a default stream. Responses are received in the same order in which requests are sent. There will be one response for each successful inserted request. Responses may optionally embed error information if the originating AppendRequest was not successfully processed. The specifics of when successfully appended data is made visible to the table are governed by the type of stream: * For COMMITTED streams (which includes the default stream), data is visible immediately upon successful append. * For BUFFERED streams, data is made visible via a subsequent `FlushRows` rpc which advances a cursor to a newer offset in the stream. * For PENDING streams, data is not made visible until the stream itself is finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly committed via the `BatchCommitWriteStreams` rpc. Note: For users coding against the gRPC api directly, it may be necessary to supply the x-goog-request-params system parameter with `write_stream=`. More information about system parameters: https://cloud.google.com/apis/docs/system-parameters", + "canonical": true, + "file": "big_query_write.append_rows.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 77, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "AppendRows", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.AppendRows", + "async": true, + "parameters": [ + { + "name": "write_stream", + "type": "TYPE_STRING" + }, + { + "name": "offset", + "type": ".google.protobuf.Int64Value" + }, + { + "name": "proto_rows", + "type": ".google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData" + }, + { + "name": "trace_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.AppendRowsResponse", + "client": { + "shortName": "BigQueryWriteClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" + }, + "method": { + "shortName": "AppendRows", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.AppendRows", + "service": { + "shortName": "BigQueryWrite", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_GetWriteStream_async", + "title": "BigQueryRead getWriteStream Sample", + "origin": "API_DEFINITION", + "description": " Gets information about a write stream.", + "canonical": true, + "file": "big_query_write.get_write_stream.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 51, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetWriteStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.GetWriteStream", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.WriteStream", + "client": { + "shortName": "BigQueryWriteClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" + }, + "method": { + "shortName": "GetWriteStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.GetWriteStream", + "service": { + "shortName": "BigQueryWrite", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_FinalizeWriteStream_async", + "title": "BigQueryRead finalizeWriteStream Sample", + "origin": "API_DEFINITION", + "description": " Finalize a write stream so that no new data can be appended to the stream. Finalize is not supported on the '_default' stream.", + "canonical": true, + "file": "big_query_write.finalize_write_stream.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 51, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "FinalizeWriteStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FinalizeWriteStream", + "async": true, + "parameters": [ + { + "name": "name", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse", + "client": { + "shortName": "BigQueryWriteClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" + }, + "method": { + "shortName": "FinalizeWriteStream", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FinalizeWriteStream", + "service": { + "shortName": "BigQueryWrite", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_BatchCommitWriteStreams_async", + "title": "BigQueryRead batchCommitWriteStreams Sample", + "origin": "API_DEFINITION", + "description": " Atomically commits a group of `PENDING` streams that belong to the same `parent` table. Streams must be finalized before commit and cannot be committed multiple times. Once a stream is committed, data in the stream becomes available for read operations.", + "canonical": true, + "file": "big_query_write.batch_commit_write_streams.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 56, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "BatchCommitWriteStreams", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.BatchCommitWriteStreams", + "async": true, + "parameters": [ + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "write_streams", + "type": "TYPE_STRING[]" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse", + "client": { + "shortName": "BigQueryWriteClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" + }, + "method": { + "shortName": "BatchCommitWriteStreams", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.BatchCommitWriteStreams", + "service": { + "shortName": "BigQueryWrite", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_FlushRows_async", + "title": "BigQueryRead flushRows Sample", + "origin": "API_DEFINITION", + "description": " Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush operation is required in order for the rows to become available for reading. A Flush operation flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in the request. Flush is not supported on the _default stream, since it is not BUFFERED.", + "canonical": true, + "file": "big_query_write.flush_rows.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 55, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "FlushRows", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FlushRows", + "async": true, + "parameters": [ + { + "name": "write_stream", + "type": "TYPE_STRING" + }, + { + "name": "offset", + "type": ".google.protobuf.Int64Value" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1.FlushRowsResponse", + "client": { + "shortName": "BigQueryWriteClient", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" + }, + "method": { + "shortName": "FlushRows", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FlushRows", + "service": { + "shortName": "BigQueryWrite", + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" + } + } + } + } + ] +} diff --git a/owl-bot-staging/v1/src/index.ts b/owl-bot-staging/v1/src/index.ts new file mode 100644 index 00000000..b0d739d4 --- /dev/null +++ b/owl-bot-staging/v1/src/index.ts @@ -0,0 +1,27 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as v1 from './v1'; +const BigQueryReadClient = v1.BigQueryReadClient; +type BigQueryReadClient = v1.BigQueryReadClient; +const BigQueryWriteClient = v1.BigQueryWriteClient; +type BigQueryWriteClient = v1.BigQueryWriteClient; +export {v1, BigQueryReadClient, BigQueryWriteClient}; +export default {v1, BigQueryReadClient, BigQueryWriteClient}; +import * as protos from '../protos/protos'; +export {protos} diff --git a/owl-bot-staging/v1/src/v1/big_query_read_client.ts b/owl-bot-staging/v1/src/v1/big_query_read_client.ts new file mode 100644 index 00000000..2f22944c --- /dev/null +++ b/owl-bot-staging/v1/src/v1/big_query_read_client.ts @@ -0,0 +1,816 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import {Callback, CallOptions, Descriptors, ClientOptions, GoogleError} from 'google-gax'; + +import { PassThrough } from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1/big_query_read_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './big_query_read_client_config.json'; + +const version = require('../../../package.json').version; + +/** + * BigQuery Read API. + * + * The Read API can be used to read data from BigQuery. + * @class + * @memberof v1 + */ +export class BigQueryReadClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + pathTemplates: {[name: string]: gax.PathTemplate}; + bigQueryReadStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of BigQueryReadClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean} [options.fallback] - Use HTTP fallback mode. + * In fallback mode, a special browser-compatible transport implementation is used + * instead of gRPC transport. In browser context (if the `window` object is defined) + * the fallback mode is enabled automatically; set `options.fallback` to `false` + * if you need to override this behavior. + */ + constructor(opts?: ClientOptions) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof BigQueryReadClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gax.fallback : gax; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // This API contains "path templates"; forward-slash-separated + // identifiers to uniquely identify resources within the API. + // Create useful helper objects for these. + this.pathTemplates = { + projectPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}' + ), + readSessionPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/sessions/{session}' + ), + readStreamPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/sessions/{session}/streams/{stream}' + ), + tablePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/datasets/{dataset}/tables/{table}' + ), + writeStreamPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}' + ), + }; + + // Some of the methods on this service provide streaming responses. + // Provide descriptors for these. + this.descriptors.stream = { + readRows: new this._gaxModule.StreamDescriptor(gax.StreamType.SERVER_STREAMING, opts.fallback === 'rest') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.storage.v1.BigQueryRead', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = gax.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.bigQueryReadStub) { + return this.bigQueryReadStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.storage.v1.BigQueryRead. + this.bigQueryReadStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.storage.v1.BigQueryRead') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.storage.v1.BigQueryRead, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const bigQueryReadStubMethods = + ['createReadSession', 'readRows', 'splitReadStream']; + for (const methodName of bigQueryReadStubMethods) { + const callPromise = this.bigQueryReadStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + if (methodName in this.descriptors.stream) { + const stream = new PassThrough(); + setImmediate(() => { + stream.emit('error', new GoogleError('The client has already been closed.')); + }); + return stream; + } + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.stream[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.bigQueryReadStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'bigquerystorage.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'bigquerystorage.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Creates a new read session. A read session divides the contents of a + * BigQuery table into one or more streams, which can then be used to read + * data from the table. The read session also specifies properties of the + * data to be read, such as a list of columns or a push-down filter describing + * the rows to be returned. + * + * A particular row can be read by at most one stream. When the caller has + * reached the end of each stream in the session, then all the data in the + * table has been read. + * + * Data is assigned to each stream such that roughly the same number of + * rows can be read from each stream. Because the server-side unit for + * assigning data is collections of rows, the API does not guarantee that + * each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some + * filters can lead to lopsided assignments. + * + * Read sessions automatically expire 6 hours after they are created and do + * not require manual clean-up by the caller. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. The request project that owns the session, in the form of + * `projects/{project_id}`. + * @param {google.cloud.bigquery.storage.v1.ReadSession} request.readSession + * Required. Session to be created. + * @param {number} request.maxStreamCount + * Max initial number of streams. If unset or zero, the server will + * provide a value of streams so as to produce reasonable throughput. Must be + * non-negative. The number of streams may be lower than the requested number, + * depending on the amount parallelism that is reasonable for the table. Error + * will be returned if the max count is greater than the current system + * max limit of 1,000. + * + * Streams must be read starting from offset 0. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [ReadSession]{@link google.cloud.bigquery.storage.v1.ReadSession}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/big_query_read.create_read_session.js + * region_tag:bigquerystorage_v1_generated_BigQueryRead_CreateReadSession_async + */ + createReadSession( + request?: protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.storage.v1.IReadSession, + protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest|undefined, {}|undefined + ]>; + createReadSession( + request: protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.IReadSession, + protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest|null|undefined, + {}|null|undefined>): void; + createReadSession( + request: protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.IReadSession, + protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest|null|undefined, + {}|null|undefined>): void; + createReadSession( + request?: protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.storage.v1.IReadSession, + protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.storage.v1.IReadSession, + protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.storage.v1.IReadSession, + protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'read_session.table': request.readSession!.table || '', + }); + this.initialize(); + return this.innerApiCalls.createReadSession(request, options, callback); + } +/** + * Splits a given `ReadStream` into two `ReadStream` objects. These + * `ReadStream` objects are referred to as the primary and the residual + * streams of the split. The original `ReadStream` can still be read from in + * the same manner as before. Both of the returned `ReadStream` objects can + * also be read from, and the rows returned by both child streams will be + * the same as the rows read from the original stream. + * + * Moreover, the two child streams will be allocated back-to-back in the + * original `ReadStream`. Concretely, it is guaranteed that for streams + * original, primary, and residual, that original[0-j] = primary[0-j] and + * original[j-n] = residual[0-m] once the streams have been read to + * completion. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. Name of the stream to split. + * @param {number} request.fraction + * A value in the range (0.0, 1.0) that specifies the fractional point at + * which the original stream should be split. The actual split point is + * evaluated on pre-filtered rows, so if a filter is provided, then there is + * no guarantee that the division of the rows between the new child streams + * will be proportional to this fractional value. Additionally, because the + * server-side unit for assigning data is collections of rows, this fraction + * will always map to a data storage boundary on the server side. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [SplitReadStreamResponse]{@link google.cloud.bigquery.storage.v1.SplitReadStreamResponse}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/big_query_read.split_read_stream.js + * region_tag:bigquerystorage_v1_generated_BigQueryRead_SplitReadStream_async + */ + splitReadStream( + request?: protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, + protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest|undefined, {}|undefined + ]>; + splitReadStream( + request: protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, + protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest|null|undefined, + {}|null|undefined>): void; + splitReadStream( + request: protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, + protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest|null|undefined, + {}|null|undefined>): void; + splitReadStream( + request?: protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, + protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, + protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, + protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'name': request.name || '', + }); + this.initialize(); + return this.innerApiCalls.splitReadStream(request, options, callback); + } + +/** + * Reads rows from the stream in the format prescribed by the ReadSession. + * Each response contains one or more table rows, up to a maximum of 100 MiB + * per response; read requests which attempt to read individual rows larger + * than 100 MiB will fail. + * + * Each request also returns a set of stream statistics reflecting the current + * state of the stream. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.readStream + * Required. Stream to read rows from. + * @param {number} request.offset + * The offset requested must be less than the last row read from Read. + * Requesting a larger offset is undefined. If not specified, start reading + * from offset zero. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits [ReadRowsResponse]{@link google.cloud.bigquery.storage.v1.ReadRowsResponse} on 'data' event. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#server-streaming) + * for more details and examples. + * @example include:samples/generated/v1/big_query_read.read_rows.js + * region_tag:bigquerystorage_v1_generated_BigQueryRead_ReadRows_async + */ + readRows( + request?: protos.google.cloud.bigquery.storage.v1.IReadRowsRequest, + options?: CallOptions): + gax.CancellableStream{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'read_stream': request.readStream || '', + }); + this.initialize(); + return this.innerApiCalls.readRows(request, options); + } + + // -------------------- + // -- Path templates -- + // -------------------- + + /** + * Return a fully-qualified project resource name string. + * + * @param {string} project + * @returns {string} Resource name string. + */ + projectPath(project:string) { + return this.pathTemplates.projectPathTemplate.render({ + project: project, + }); + } + + /** + * Parse the project from Project resource. + * + * @param {string} projectName + * A fully-qualified path representing Project resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectName(projectName: string) { + return this.pathTemplates.projectPathTemplate.match(projectName).project; + } + + /** + * Return a fully-qualified readSession resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} session + * @returns {string} Resource name string. + */ + readSessionPath(project:string,location:string,session:string) { + return this.pathTemplates.readSessionPathTemplate.render({ + project: project, + location: location, + session: session, + }); + } + + /** + * Parse the project from ReadSession resource. + * + * @param {string} readSessionName + * A fully-qualified path representing ReadSession resource. + * @returns {string} A string representing the project. + */ + matchProjectFromReadSessionName(readSessionName: string) { + return this.pathTemplates.readSessionPathTemplate.match(readSessionName).project; + } + + /** + * Parse the location from ReadSession resource. + * + * @param {string} readSessionName + * A fully-qualified path representing ReadSession resource. + * @returns {string} A string representing the location. + */ + matchLocationFromReadSessionName(readSessionName: string) { + return this.pathTemplates.readSessionPathTemplate.match(readSessionName).location; + } + + /** + * Parse the session from ReadSession resource. + * + * @param {string} readSessionName + * A fully-qualified path representing ReadSession resource. + * @returns {string} A string representing the session. + */ + matchSessionFromReadSessionName(readSessionName: string) { + return this.pathTemplates.readSessionPathTemplate.match(readSessionName).session; + } + + /** + * Return a fully-qualified readStream resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} session + * @param {string} stream + * @returns {string} Resource name string. + */ + readStreamPath(project:string,location:string,session:string,stream:string) { + return this.pathTemplates.readStreamPathTemplate.render({ + project: project, + location: location, + session: session, + stream: stream, + }); + } + + /** + * Parse the project from ReadStream resource. + * + * @param {string} readStreamName + * A fully-qualified path representing ReadStream resource. + * @returns {string} A string representing the project. + */ + matchProjectFromReadStreamName(readStreamName: string) { + return this.pathTemplates.readStreamPathTemplate.match(readStreamName).project; + } + + /** + * Parse the location from ReadStream resource. + * + * @param {string} readStreamName + * A fully-qualified path representing ReadStream resource. + * @returns {string} A string representing the location. + */ + matchLocationFromReadStreamName(readStreamName: string) { + return this.pathTemplates.readStreamPathTemplate.match(readStreamName).location; + } + + /** + * Parse the session from ReadStream resource. + * + * @param {string} readStreamName + * A fully-qualified path representing ReadStream resource. + * @returns {string} A string representing the session. + */ + matchSessionFromReadStreamName(readStreamName: string) { + return this.pathTemplates.readStreamPathTemplate.match(readStreamName).session; + } + + /** + * Parse the stream from ReadStream resource. + * + * @param {string} readStreamName + * A fully-qualified path representing ReadStream resource. + * @returns {string} A string representing the stream. + */ + matchStreamFromReadStreamName(readStreamName: string) { + return this.pathTemplates.readStreamPathTemplate.match(readStreamName).stream; + } + + /** + * Return a fully-qualified table resource name string. + * + * @param {string} project + * @param {string} dataset + * @param {string} table + * @returns {string} Resource name string. + */ + tablePath(project:string,dataset:string,table:string) { + return this.pathTemplates.tablePathTemplate.render({ + project: project, + dataset: dataset, + table: table, + }); + } + + /** + * Parse the project from Table resource. + * + * @param {string} tableName + * A fully-qualified path representing Table resource. + * @returns {string} A string representing the project. + */ + matchProjectFromTableName(tableName: string) { + return this.pathTemplates.tablePathTemplate.match(tableName).project; + } + + /** + * Parse the dataset from Table resource. + * + * @param {string} tableName + * A fully-qualified path representing Table resource. + * @returns {string} A string representing the dataset. + */ + matchDatasetFromTableName(tableName: string) { + return this.pathTemplates.tablePathTemplate.match(tableName).dataset; + } + + /** + * Parse the table from Table resource. + * + * @param {string} tableName + * A fully-qualified path representing Table resource. + * @returns {string} A string representing the table. + */ + matchTableFromTableName(tableName: string) { + return this.pathTemplates.tablePathTemplate.match(tableName).table; + } + + /** + * Return a fully-qualified writeStream resource name string. + * + * @param {string} project + * @param {string} dataset + * @param {string} table + * @param {string} stream + * @returns {string} Resource name string. + */ + writeStreamPath(project:string,dataset:string,table:string,stream:string) { + return this.pathTemplates.writeStreamPathTemplate.render({ + project: project, + dataset: dataset, + table: table, + stream: stream, + }); + } + + /** + * Parse the project from WriteStream resource. + * + * @param {string} writeStreamName + * A fully-qualified path representing WriteStream resource. + * @returns {string} A string representing the project. + */ + matchProjectFromWriteStreamName(writeStreamName: string) { + return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).project; + } + + /** + * Parse the dataset from WriteStream resource. + * + * @param {string} writeStreamName + * A fully-qualified path representing WriteStream resource. + * @returns {string} A string representing the dataset. + */ + matchDatasetFromWriteStreamName(writeStreamName: string) { + return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).dataset; + } + + /** + * Parse the table from WriteStream resource. + * + * @param {string} writeStreamName + * A fully-qualified path representing WriteStream resource. + * @returns {string} A string representing the table. + */ + matchTableFromWriteStreamName(writeStreamName: string) { + return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).table; + } + + /** + * Parse the stream from WriteStream resource. + * + * @param {string} writeStreamName + * A fully-qualified path representing WriteStream resource. + * @returns {string} A string representing the stream. + */ + matchStreamFromWriteStreamName(writeStreamName: string) { + return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).stream; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.bigQueryReadStub && !this._terminated) { + return this.bigQueryReadStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/v1/src/v1/big_query_read_client_config.json b/owl-bot-staging/v1/src/v1/big_query_read_client_config.json new file mode 100644 index 00000000..42b2735b --- /dev/null +++ b/owl-bot-staging/v1/src/v1/big_query_read_client_config.json @@ -0,0 +1,44 @@ +{ + "interfaces": { + "google.cloud.bigquery.storage.v1.BigQueryRead": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ], + "unavailable": [ + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "CreateReadSession": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "ReadRows": { + "timeout_millis": 86400000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, + "SplitReadStream": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/v1/src/v1/big_query_read_proto_list.json b/owl-bot-staging/v1/src/v1/big_query_read_proto_list.json new file mode 100644 index 00000000..f0274ac3 --- /dev/null +++ b/owl-bot-staging/v1/src/v1/big_query_read_proto_list.json @@ -0,0 +1,8 @@ +[ + "../../protos/google/cloud/bigquery/storage/v1/arrow.proto", + "../../protos/google/cloud/bigquery/storage/v1/avro.proto", + "../../protos/google/cloud/bigquery/storage/v1/protobuf.proto", + "../../protos/google/cloud/bigquery/storage/v1/storage.proto", + "../../protos/google/cloud/bigquery/storage/v1/stream.proto", + "../../protos/google/cloud/bigquery/storage/v1/table.proto" +] diff --git a/owl-bot-staging/v1/src/v1/big_query_write_client.ts b/owl-bot-staging/v1/src/v1/big_query_write_client.ts new file mode 100644 index 00000000..622e758e --- /dev/null +++ b/owl-bot-staging/v1/src/v1/big_query_write_client.ts @@ -0,0 +1,1028 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import {Callback, CallOptions, Descriptors, ClientOptions, GoogleError} from 'google-gax'; + +import { PassThrough } from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1/big_query_write_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './big_query_write_client_config.json'; + +const version = require('../../../package.json').version; + +/** + * BigQuery Write API. + * + * The Write API can be used to write data to BigQuery. + * + * For supplementary information about the Write API, see: + * https://cloud.google.com/bigquery/docs/write-api + * @class + * @memberof v1 + */ +export class BigQueryWriteClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + pathTemplates: {[name: string]: gax.PathTemplate}; + bigQueryWriteStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of BigQueryWriteClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean} [options.fallback] - Use HTTP fallback mode. + * In fallback mode, a special browser-compatible transport implementation is used + * instead of gRPC transport. In browser context (if the `window` object is defined) + * the fallback mode is enabled automatically; set `options.fallback` to `false` + * if you need to override this behavior. + */ + constructor(opts?: ClientOptions) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof BigQueryWriteClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gax.fallback : gax; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // This API contains "path templates"; forward-slash-separated + // identifiers to uniquely identify resources within the API. + // Create useful helper objects for these. + this.pathTemplates = { + projectPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}' + ), + readSessionPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/sessions/{session}' + ), + readStreamPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/sessions/{session}/streams/{stream}' + ), + tablePathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/datasets/{dataset}/tables/{table}' + ), + writeStreamPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}' + ), + }; + + // Some of the methods on this service provide streaming responses. + // Provide descriptors for these. + this.descriptors.stream = { + appendRows: new this._gaxModule.StreamDescriptor(gax.StreamType.BIDI_STREAMING, opts.fallback === 'rest') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.storage.v1.BigQueryWrite', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = gax.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.bigQueryWriteStub) { + return this.bigQueryWriteStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.storage.v1.BigQueryWrite. + this.bigQueryWriteStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.storage.v1.BigQueryWrite') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.storage.v1.BigQueryWrite, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const bigQueryWriteStubMethods = + ['createWriteStream', 'appendRows', 'getWriteStream', 'finalizeWriteStream', 'batchCommitWriteStreams', 'flushRows']; + for (const methodName of bigQueryWriteStubMethods) { + const callPromise = this.bigQueryWriteStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + if (methodName in this.descriptors.stream) { + const stream = new PassThrough(); + setImmediate(() => { + stream.emit('error', new GoogleError('The client has already been closed.')); + }); + return stream; + } + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.stream[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.bigQueryWriteStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'bigquerystorage.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'bigquerystorage.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/bigquery.insertdata', + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Creates a write stream to the given table. + * Additionally, every table has a special stream named '_default' + * to which data can be written. This stream doesn't need to be created using + * CreateWriteStream. It is a stream that can be used simultaneously by any + * number of clients. Data written to this stream is considered committed as + * soon as an acknowledgement is received. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. Reference to the table to which the stream belongs, in the format + * of `projects/{project}/datasets/{dataset}/tables/{table}`. + * @param {google.cloud.bigquery.storage.v1.WriteStream} request.writeStream + * Required. Stream to be created. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [WriteStream]{@link google.cloud.bigquery.storage.v1.WriteStream}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/big_query_write.create_write_stream.js + * region_tag:bigquerystorage_v1_generated_BigQueryWrite_CreateWriteStream_async + */ + createWriteStream( + request?: protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.storage.v1.IWriteStream, + protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest|undefined, {}|undefined + ]>; + createWriteStream( + request: protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.IWriteStream, + protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest|null|undefined, + {}|null|undefined>): void; + createWriteStream( + request: protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.IWriteStream, + protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest|null|undefined, + {}|null|undefined>): void; + createWriteStream( + request?: protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.storage.v1.IWriteStream, + protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.storage.v1.IWriteStream, + protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.storage.v1.IWriteStream, + protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'parent': request.parent || '', + }); + this.initialize(); + return this.innerApiCalls.createWriteStream(request, options, callback); + } +/** + * Gets information about a write stream. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. Name of the stream to get, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [WriteStream]{@link google.cloud.bigquery.storage.v1.WriteStream}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/big_query_write.get_write_stream.js + * region_tag:bigquerystorage_v1_generated_BigQueryWrite_GetWriteStream_async + */ + getWriteStream( + request?: protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.storage.v1.IWriteStream, + protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest|undefined, {}|undefined + ]>; + getWriteStream( + request: protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.IWriteStream, + protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest|null|undefined, + {}|null|undefined>): void; + getWriteStream( + request: protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.IWriteStream, + protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest|null|undefined, + {}|null|undefined>): void; + getWriteStream( + request?: protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.storage.v1.IWriteStream, + protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.storage.v1.IWriteStream, + protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.storage.v1.IWriteStream, + protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'name': request.name || '', + }); + this.initialize(); + return this.innerApiCalls.getWriteStream(request, options, callback); + } +/** + * Finalize a write stream so that no new data can be appended to the + * stream. Finalize is not supported on the '_default' stream. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.name + * Required. Name of the stream to finalize, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [FinalizeWriteStreamResponse]{@link google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/big_query_write.finalize_write_stream.js + * region_tag:bigquerystorage_v1_generated_BigQueryWrite_FinalizeWriteStream_async + */ + finalizeWriteStream( + request?: protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, + protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest|undefined, {}|undefined + ]>; + finalizeWriteStream( + request: protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, + protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest|null|undefined, + {}|null|undefined>): void; + finalizeWriteStream( + request: protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, + protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest|null|undefined, + {}|null|undefined>): void; + finalizeWriteStream( + request?: protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, + protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, + protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, + protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'name': request.name || '', + }); + this.initialize(); + return this.innerApiCalls.finalizeWriteStream(request, options, callback); + } +/** + * Atomically commits a group of `PENDING` streams that belong to the same + * `parent` table. + * + * Streams must be finalized before commit and cannot be committed multiple + * times. Once a stream is committed, data in the stream becomes available + * for read operations. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.parent + * Required. Parent table that all the streams should belong to, in the form of + * `projects/{project}/datasets/{dataset}/tables/{table}`. + * @param {string[]} request.writeStreams + * Required. The group of streams that will be committed atomically. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [BatchCommitWriteStreamsResponse]{@link google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/big_query_write.batch_commit_write_streams.js + * region_tag:bigquerystorage_v1_generated_BigQueryWrite_BatchCommitWriteStreams_async + */ + batchCommitWriteStreams( + request?: protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, + protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest|undefined, {}|undefined + ]>; + batchCommitWriteStreams( + request: protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, + protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest|null|undefined, + {}|null|undefined>): void; + batchCommitWriteStreams( + request: protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, + protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest|null|undefined, + {}|null|undefined>): void; + batchCommitWriteStreams( + request?: protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, + protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, + protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, + protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'parent': request.parent || '', + }); + this.initialize(); + return this.innerApiCalls.batchCommitWriteStreams(request, options, callback); + } +/** + * Flushes rows to a BUFFERED stream. + * + * If users are appending rows to BUFFERED stream, flush operation is + * required in order for the rows to become available for reading. A + * Flush operation flushes up to any previously flushed offset in a BUFFERED + * stream, to the offset specified in the request. + * + * Flush is not supported on the _default stream, since it is not BUFFERED. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.writeStream + * Required. The stream that is the target of the flush operation. + * @param {google.protobuf.Int64Value} request.offset + * Ending offset of the flush operation. Rows before this offset(including + * this offset) will be flushed. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [FlushRowsResponse]{@link google.cloud.bigquery.storage.v1.FlushRowsResponse}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1/big_query_write.flush_rows.js + * region_tag:bigquerystorage_v1_generated_BigQueryWrite_FlushRows_async + */ + flushRows( + request?: protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, + protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest|undefined, {}|undefined + ]>; + flushRows( + request: protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, + protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest|null|undefined, + {}|null|undefined>): void; + flushRows( + request: protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest, + callback: Callback< + protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, + protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest|null|undefined, + {}|null|undefined>): void; + flushRows( + request?: protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, + protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, + protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, + protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'write_stream': request.writeStream || '', + }); + this.initialize(); + return this.innerApiCalls.flushRows(request, options, callback); + } + +/** + * Appends data to the given stream. + * + * If `offset` is specified, the `offset` is checked against the end of + * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an + * attempt is made to append to an offset beyond the current end of the stream + * or `ALREADY_EXISTS` if user provides an `offset` that has already been + * written to. User can retry with adjusted offset within the same RPC + * connection. If `offset` is not specified, append happens at the end of the + * stream. + * + * The response contains an optional offset at which the append + * happened. No offset information will be returned for appends to a + * default stream. + * + * Responses are received in the same order in which requests are sent. + * There will be one response for each successful inserted request. Responses + * may optionally embed error information if the originating AppendRequest was + * not successfully processed. + * + * The specifics of when successfully appended data is made visible to the + * table are governed by the type of stream: + * + * * For COMMITTED streams (which includes the default stream), data is + * visible immediately upon successful append. + * + * * For BUFFERED streams, data is made visible via a subsequent `FlushRows` + * rpc which advances a cursor to a newer offset in the stream. + * + * * For PENDING streams, data is not made visible until the stream itself is + * finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly + * committed via the `BatchCommitWriteStreams` rpc. + * + * Note: For users coding against the gRPC api directly, it may be + * necessary to supply the x-goog-request-params system parameter + * with `write_stream=`. + * + * More information about system parameters: + * https://cloud.google.com/apis/docs/system-parameters + * + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which is both readable and writable. It accepts objects + * representing [AppendRowsRequest]{@link google.cloud.bigquery.storage.v1.AppendRowsRequest} for write() method, and + * will emit objects representing [AppendRowsResponse]{@link google.cloud.bigquery.storage.v1.AppendRowsResponse} on 'data' event asynchronously. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#bi-directional-streaming) + * for more details and examples. + * @example include:samples/generated/v1/big_query_write.append_rows.js + * region_tag:bigquerystorage_v1_generated_BigQueryWrite_AppendRows_async + */ + appendRows( + options?: CallOptions): + gax.CancellableStream { + this.initialize(); + return this.innerApiCalls.appendRows(null, options); + } + + // -------------------- + // -- Path templates -- + // -------------------- + + /** + * Return a fully-qualified project resource name string. + * + * @param {string} project + * @returns {string} Resource name string. + */ + projectPath(project:string) { + return this.pathTemplates.projectPathTemplate.render({ + project: project, + }); + } + + /** + * Parse the project from Project resource. + * + * @param {string} projectName + * A fully-qualified path representing Project resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectName(projectName: string) { + return this.pathTemplates.projectPathTemplate.match(projectName).project; + } + + /** + * Return a fully-qualified readSession resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} session + * @returns {string} Resource name string. + */ + readSessionPath(project:string,location:string,session:string) { + return this.pathTemplates.readSessionPathTemplate.render({ + project: project, + location: location, + session: session, + }); + } + + /** + * Parse the project from ReadSession resource. + * + * @param {string} readSessionName + * A fully-qualified path representing ReadSession resource. + * @returns {string} A string representing the project. + */ + matchProjectFromReadSessionName(readSessionName: string) { + return this.pathTemplates.readSessionPathTemplate.match(readSessionName).project; + } + + /** + * Parse the location from ReadSession resource. + * + * @param {string} readSessionName + * A fully-qualified path representing ReadSession resource. + * @returns {string} A string representing the location. + */ + matchLocationFromReadSessionName(readSessionName: string) { + return this.pathTemplates.readSessionPathTemplate.match(readSessionName).location; + } + + /** + * Parse the session from ReadSession resource. + * + * @param {string} readSessionName + * A fully-qualified path representing ReadSession resource. + * @returns {string} A string representing the session. + */ + matchSessionFromReadSessionName(readSessionName: string) { + return this.pathTemplates.readSessionPathTemplate.match(readSessionName).session; + } + + /** + * Return a fully-qualified readStream resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} session + * @param {string} stream + * @returns {string} Resource name string. + */ + readStreamPath(project:string,location:string,session:string,stream:string) { + return this.pathTemplates.readStreamPathTemplate.render({ + project: project, + location: location, + session: session, + stream: stream, + }); + } + + /** + * Parse the project from ReadStream resource. + * + * @param {string} readStreamName + * A fully-qualified path representing ReadStream resource. + * @returns {string} A string representing the project. + */ + matchProjectFromReadStreamName(readStreamName: string) { + return this.pathTemplates.readStreamPathTemplate.match(readStreamName).project; + } + + /** + * Parse the location from ReadStream resource. + * + * @param {string} readStreamName + * A fully-qualified path representing ReadStream resource. + * @returns {string} A string representing the location. + */ + matchLocationFromReadStreamName(readStreamName: string) { + return this.pathTemplates.readStreamPathTemplate.match(readStreamName).location; + } + + /** + * Parse the session from ReadStream resource. + * + * @param {string} readStreamName + * A fully-qualified path representing ReadStream resource. + * @returns {string} A string representing the session. + */ + matchSessionFromReadStreamName(readStreamName: string) { + return this.pathTemplates.readStreamPathTemplate.match(readStreamName).session; + } + + /** + * Parse the stream from ReadStream resource. + * + * @param {string} readStreamName + * A fully-qualified path representing ReadStream resource. + * @returns {string} A string representing the stream. + */ + matchStreamFromReadStreamName(readStreamName: string) { + return this.pathTemplates.readStreamPathTemplate.match(readStreamName).stream; + } + + /** + * Return a fully-qualified table resource name string. + * + * @param {string} project + * @param {string} dataset + * @param {string} table + * @returns {string} Resource name string. + */ + tablePath(project:string,dataset:string,table:string) { + return this.pathTemplates.tablePathTemplate.render({ + project: project, + dataset: dataset, + table: table, + }); + } + + /** + * Parse the project from Table resource. + * + * @param {string} tableName + * A fully-qualified path representing Table resource. + * @returns {string} A string representing the project. + */ + matchProjectFromTableName(tableName: string) { + return this.pathTemplates.tablePathTemplate.match(tableName).project; + } + + /** + * Parse the dataset from Table resource. + * + * @param {string} tableName + * A fully-qualified path representing Table resource. + * @returns {string} A string representing the dataset. + */ + matchDatasetFromTableName(tableName: string) { + return this.pathTemplates.tablePathTemplate.match(tableName).dataset; + } + + /** + * Parse the table from Table resource. + * + * @param {string} tableName + * A fully-qualified path representing Table resource. + * @returns {string} A string representing the table. + */ + matchTableFromTableName(tableName: string) { + return this.pathTemplates.tablePathTemplate.match(tableName).table; + } + + /** + * Return a fully-qualified writeStream resource name string. + * + * @param {string} project + * @param {string} dataset + * @param {string} table + * @param {string} stream + * @returns {string} Resource name string. + */ + writeStreamPath(project:string,dataset:string,table:string,stream:string) { + return this.pathTemplates.writeStreamPathTemplate.render({ + project: project, + dataset: dataset, + table: table, + stream: stream, + }); + } + + /** + * Parse the project from WriteStream resource. + * + * @param {string} writeStreamName + * A fully-qualified path representing WriteStream resource. + * @returns {string} A string representing the project. + */ + matchProjectFromWriteStreamName(writeStreamName: string) { + return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).project; + } + + /** + * Parse the dataset from WriteStream resource. + * + * @param {string} writeStreamName + * A fully-qualified path representing WriteStream resource. + * @returns {string} A string representing the dataset. + */ + matchDatasetFromWriteStreamName(writeStreamName: string) { + return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).dataset; + } + + /** + * Parse the table from WriteStream resource. + * + * @param {string} writeStreamName + * A fully-qualified path representing WriteStream resource. + * @returns {string} A string representing the table. + */ + matchTableFromWriteStreamName(writeStreamName: string) { + return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).table; + } + + /** + * Parse the stream from WriteStream resource. + * + * @param {string} writeStreamName + * A fully-qualified path representing WriteStream resource. + * @returns {string} A string representing the stream. + */ + matchStreamFromWriteStreamName(writeStreamName: string) { + return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).stream; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.bigQueryWriteStub && !this._terminated) { + return this.bigQueryWriteStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/v1/src/v1/big_query_write_client_config.json b/owl-bot-staging/v1/src/v1/big_query_write_client_config.json new file mode 100644 index 00000000..67eb3165 --- /dev/null +++ b/owl-bot-staging/v1/src/v1/big_query_write_client_config.json @@ -0,0 +1,59 @@ +{ + "interfaces": { + "google.cloud.bigquery.storage.v1.BigQueryWrite": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ], + "unavailable": [ + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "CreateWriteStream": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "AppendRows": { + "timeout_millis": 86400000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, + "GetWriteStream": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "FinalizeWriteStream": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "BatchCommitWriteStreams": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "FlushRows": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/v1/src/v1/big_query_write_proto_list.json b/owl-bot-staging/v1/src/v1/big_query_write_proto_list.json new file mode 100644 index 00000000..f0274ac3 --- /dev/null +++ b/owl-bot-staging/v1/src/v1/big_query_write_proto_list.json @@ -0,0 +1,8 @@ +[ + "../../protos/google/cloud/bigquery/storage/v1/arrow.proto", + "../../protos/google/cloud/bigquery/storage/v1/avro.proto", + "../../protos/google/cloud/bigquery/storage/v1/protobuf.proto", + "../../protos/google/cloud/bigquery/storage/v1/storage.proto", + "../../protos/google/cloud/bigquery/storage/v1/stream.proto", + "../../protos/google/cloud/bigquery/storage/v1/table.proto" +] diff --git a/owl-bot-staging/v1/src/v1/gapic_metadata.json b/owl-bot-staging/v1/src/v1/gapic_metadata.json new file mode 100644 index 00000000..f52c2dae --- /dev/null +++ b/owl-bot-staging/v1/src/v1/gapic_metadata.json @@ -0,0 +1,117 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "typescript", + "protoPackage": "google.cloud.bigquery.storage.v1", + "libraryPackage": "@google-cloud/bigquery-storage", + "services": { + "BigQueryRead": { + "clients": { + "grpc": { + "libraryClient": "BigQueryReadClient", + "rpcs": { + "CreateReadSession": { + "methods": [ + "createReadSession" + ] + }, + "SplitReadStream": { + "methods": [ + "splitReadStream" + ] + }, + "ReadRows": { + "methods": [ + "readRows" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "BigQueryReadClient", + "rpcs": { + "CreateReadSession": { + "methods": [ + "createReadSession" + ] + }, + "SplitReadStream": { + "methods": [ + "splitReadStream" + ] + } + } + } + } + }, + "BigQueryWrite": { + "clients": { + "grpc": { + "libraryClient": "BigQueryWriteClient", + "rpcs": { + "CreateWriteStream": { + "methods": [ + "createWriteStream" + ] + }, + "GetWriteStream": { + "methods": [ + "getWriteStream" + ] + }, + "FinalizeWriteStream": { + "methods": [ + "finalizeWriteStream" + ] + }, + "BatchCommitWriteStreams": { + "methods": [ + "batchCommitWriteStreams" + ] + }, + "FlushRows": { + "methods": [ + "flushRows" + ] + }, + "AppendRows": { + "methods": [ + "appendRows" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "BigQueryWriteClient", + "rpcs": { + "CreateWriteStream": { + "methods": [ + "createWriteStream" + ] + }, + "GetWriteStream": { + "methods": [ + "getWriteStream" + ] + }, + "FinalizeWriteStream": { + "methods": [ + "finalizeWriteStream" + ] + }, + "BatchCommitWriteStreams": { + "methods": [ + "batchCommitWriteStreams" + ] + }, + "FlushRows": { + "methods": [ + "flushRows" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1/src/v1/index.ts b/owl-bot-staging/v1/src/v1/index.ts new file mode 100644 index 00000000..f3bacd94 --- /dev/null +++ b/owl-bot-staging/v1/src/v1/index.ts @@ -0,0 +1,20 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +export {BigQueryReadClient} from './big_query_read_client'; +export {BigQueryWriteClient} from './big_query_write_client'; diff --git a/owl-bot-staging/v1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1/system-test/fixtures/sample/src/index.js new file mode 100644 index 00000000..ba02bb67 --- /dev/null +++ b/owl-bot-staging/v1/system-test/fixtures/sample/src/index.js @@ -0,0 +1,28 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars */ +const storage = require('@google-cloud/bigquery-storage'); + +function main() { + const bigQueryReadClient = new storage.BigQueryReadClient(); + const bigQueryWriteClient = new storage.BigQueryWriteClient(); +} + +main(); diff --git a/owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts new file mode 100644 index 00000000..a0a1ed6d --- /dev/null +++ b/owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts @@ -0,0 +1,38 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {BigQueryReadClient, BigQueryWriteClient} from '@google-cloud/bigquery-storage'; + +// check that the client class type name can be used +function doStuffWithBigQueryReadClient(client: BigQueryReadClient) { + client.close(); +} +function doStuffWithBigQueryWriteClient(client: BigQueryWriteClient) { + client.close(); +} + +function main() { + // check that the client instance can be created + const bigQueryReadClient = new BigQueryReadClient(); + doStuffWithBigQueryReadClient(bigQueryReadClient); + // check that the client instance can be created + const bigQueryWriteClient = new BigQueryWriteClient(); + doStuffWithBigQueryWriteClient(bigQueryWriteClient); +} + +main(); diff --git a/owl-bot-staging/v1/system-test/install.ts b/owl-bot-staging/v1/system-test/install.ts new file mode 100644 index 00000000..8ec45222 --- /dev/null +++ b/owl-bot-staging/v1/system-test/install.ts @@ -0,0 +1,49 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import { packNTest } from 'pack-n-play'; +import { readFileSync } from 'fs'; +import { describe, it } from 'mocha'; + +describe('📦 pack-n-play test', () => { + + it('TypeScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'TypeScript user can use the type definitions', + ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() + } + }; + await packNTest(options); + }); + + it('JavaScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'JavaScript user can use the library', + ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() + } + }; + await packNTest(options); + }); + +}); diff --git a/owl-bot-staging/v1/test/gapic_big_query_read_v1.ts b/owl-bot-staging/v1/test/gapic_big_query_read_v1.ts new file mode 100644 index 00000000..2d7324d7 --- /dev/null +++ b/owl-bot-staging/v1/test/gapic_big_query_read_v1.ts @@ -0,0 +1,669 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import { describe, it } from 'mocha'; +import * as bigqueryreadModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubServerStreamingCall(response?: ResponseType, error?: Error) { + const transformStub = error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // write something to the stream to trigger transformStub and send the response back to the client + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + return sinon.stub().returns(mockStream); +} + +describe('v1.BigQueryReadClient', () => { + it('has servicePath', () => { + const servicePath = bigqueryreadModule.v1.BigQueryReadClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = bigqueryreadModule.v1.BigQueryReadClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = bigqueryreadModule.v1.BigQueryReadClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.bigQueryReadStub, undefined); + await client.initialize(); + assert(client.bigQueryReadStub); + }); + + it('has close method for the initialized client', done => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.bigQueryReadStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.bigQueryReadStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + + describe('createReadSession', () => { + it('invokes createReadSession without error', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateReadSessionRequest()); + request.readSession = {}; + request.readSession.table = ''; + const expectedHeaderRequestParams = "read_session.table="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.ReadSession()); + client.innerApiCalls.createReadSession = stubSimpleCall(expectedResponse); + const [response] = await client.createReadSession(request); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.createReadSession as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes createReadSession without error using callback', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateReadSessionRequest()); + request.readSession = {}; + request.readSession.table = ''; + const expectedHeaderRequestParams = "read_session.table="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.ReadSession()); + client.innerApiCalls.createReadSession = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.createReadSession( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.IReadSession|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.createReadSession as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes createReadSession with error', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateReadSessionRequest()); + request.readSession = {}; + request.readSession.table = ''; + const expectedHeaderRequestParams = "read_session.table="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedError = new Error('expected'); + client.innerApiCalls.createReadSession = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.createReadSession(request), expectedError); + assert((client.innerApiCalls.createReadSession as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes createReadSession with closed client', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateReadSessionRequest()); + request.readSession = {}; + request.readSession.table = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.createReadSession(request), expectedError); + }); + }); + + describe('splitReadStream', () => { + it('invokes splitReadStream without error', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.SplitReadStreamRequest()); + request.name = ''; + const expectedHeaderRequestParams = "name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.SplitReadStreamResponse()); + client.innerApiCalls.splitReadStream = stubSimpleCall(expectedResponse); + const [response] = await client.splitReadStream(request); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.splitReadStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes splitReadStream without error using callback', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.SplitReadStreamRequest()); + request.name = ''; + const expectedHeaderRequestParams = "name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.SplitReadStreamResponse()); + client.innerApiCalls.splitReadStream = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.splitReadStream( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.splitReadStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes splitReadStream with error', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.SplitReadStreamRequest()); + request.name = ''; + const expectedHeaderRequestParams = "name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedError = new Error('expected'); + client.innerApiCalls.splitReadStream = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.splitReadStream(request), expectedError); + assert((client.innerApiCalls.splitReadStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes splitReadStream with closed client', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.SplitReadStreamRequest()); + request.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.splitReadStream(request), expectedError); + }); + }); + + describe('readRows', () => { + it('invokes readRows without error', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.ReadRowsRequest()); + request.readStream = ''; + const expectedHeaderRequestParams = "read_stream="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.ReadRowsResponse()); + client.innerApiCalls.readRows = stubServerStreamingCall(expectedResponse); + const stream = client.readRows(request); + const promise = new Promise((resolve, reject) => { + stream.on('data', (response: protos.google.cloud.bigquery.storage.v1.ReadRowsResponse) => { + resolve(response); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.readRows as SinonStub) + .getCall(0).calledWith(request, expectedOptions)); + }); + + it('invokes readRows with error', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.ReadRowsRequest()); + request.readStream = ''; + const expectedHeaderRequestParams = "read_stream="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedError = new Error('expected'); + client.innerApiCalls.readRows = stubServerStreamingCall(undefined, expectedError); + const stream = client.readRows(request); + const promise = new Promise((resolve, reject) => { + stream.on('data', (response: protos.google.cloud.bigquery.storage.v1.ReadRowsResponse) => { + resolve(response); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.innerApiCalls.readRows as SinonStub) + .getCall(0).calledWith(request, expectedOptions)); + }); + + it('invokes readRows with closed client', async () => { + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.ReadRowsRequest()); + request.readStream = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + const stream = client.readRows(request); + const promise = new Promise((resolve, reject) => { + stream.on('data', (response: protos.google.cloud.bigquery.storage.v1.ReadRowsResponse) => { + resolve(response); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + }); + }); + + describe('Path templates', () => { + + describe('project', () => { + const fakePath = "/rendered/path/project"; + const expectedParameters = { + project: "projectValue", + }; + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectPath', () => { + const result = client.projectPath("projectValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectName', () => { + const result = client.matchProjectFromProjectName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('readSession', () => { + const fakePath = "/rendered/path/readSession"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + session: "sessionValue", + }; + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.readSessionPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.readSessionPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('readSessionPath', () => { + const result = client.readSessionPath("projectValue", "locationValue", "sessionValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.readSessionPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromReadSessionName', () => { + const result = client.matchProjectFromReadSessionName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromReadSessionName', () => { + const result = client.matchLocationFromReadSessionName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchSessionFromReadSessionName', () => { + const result = client.matchSessionFromReadSessionName(fakePath); + assert.strictEqual(result, "sessionValue"); + assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('readStream', () => { + const fakePath = "/rendered/path/readStream"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + session: "sessionValue", + stream: "streamValue", + }; + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.readStreamPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.readStreamPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('readStreamPath', () => { + const result = client.readStreamPath("projectValue", "locationValue", "sessionValue", "streamValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.readStreamPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromReadStreamName', () => { + const result = client.matchProjectFromReadStreamName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromReadStreamName', () => { + const result = client.matchLocationFromReadStreamName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchSessionFromReadStreamName', () => { + const result = client.matchSessionFromReadStreamName(fakePath); + assert.strictEqual(result, "sessionValue"); + assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchStreamFromReadStreamName', () => { + const result = client.matchStreamFromReadStreamName(fakePath); + assert.strictEqual(result, "streamValue"); + assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('table', () => { + const fakePath = "/rendered/path/table"; + const expectedParameters = { + project: "projectValue", + dataset: "datasetValue", + table: "tableValue", + }; + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.tablePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.tablePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('tablePath', () => { + const result = client.tablePath("projectValue", "datasetValue", "tableValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.tablePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromTableName', () => { + const result = client.matchProjectFromTableName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.tablePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchDatasetFromTableName', () => { + const result = client.matchDatasetFromTableName(fakePath); + assert.strictEqual(result, "datasetValue"); + assert((client.pathTemplates.tablePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchTableFromTableName', () => { + const result = client.matchTableFromTableName(fakePath); + assert.strictEqual(result, "tableValue"); + assert((client.pathTemplates.tablePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('writeStream', () => { + const fakePath = "/rendered/path/writeStream"; + const expectedParameters = { + project: "projectValue", + dataset: "datasetValue", + table: "tableValue", + stream: "streamValue", + }; + const client = new bigqueryreadModule.v1.BigQueryReadClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.writeStreamPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.writeStreamPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('writeStreamPath', () => { + const result = client.writeStreamPath("projectValue", "datasetValue", "tableValue", "streamValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.writeStreamPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromWriteStreamName', () => { + const result = client.matchProjectFromWriteStreamName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchDatasetFromWriteStreamName', () => { + const result = client.matchDatasetFromWriteStreamName(fakePath); + assert.strictEqual(result, "datasetValue"); + assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchTableFromWriteStreamName', () => { + const result = client.matchTableFromWriteStreamName(fakePath); + assert.strictEqual(result, "tableValue"); + assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchStreamFromWriteStreamName', () => { + const result = client.matchStreamFromWriteStreamName(fakePath); + assert.strictEqual(result, "streamValue"); + assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + }); +}); diff --git a/owl-bot-staging/v1/test/gapic_big_query_write_v1.ts b/owl-bot-staging/v1/test/gapic_big_query_write_v1.ts new file mode 100644 index 00000000..a5a7d4ee --- /dev/null +++ b/owl-bot-staging/v1/test/gapic_big_query_write_v1.ts @@ -0,0 +1,921 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import { describe, it } from 'mocha'; +import * as bigquerywriteModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubBidiStreamingCall(response?: ResponseType, error?: Error) { + const transformStub = error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + return sinon.stub().returns(mockStream); +} + +describe('v1.BigQueryWriteClient', () => { + it('has servicePath', () => { + const servicePath = bigquerywriteModule.v1.BigQueryWriteClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = bigquerywriteModule.v1.BigQueryWriteClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = bigquerywriteModule.v1.BigQueryWriteClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.bigQueryWriteStub, undefined); + await client.initialize(); + assert(client.bigQueryWriteStub); + }); + + it('has close method for the initialized client', done => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.bigQueryWriteStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.bigQueryWriteStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + + describe('createWriteStream', () => { + it('invokes createWriteStream without error', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest()); + request.parent = ''; + const expectedHeaderRequestParams = "parent="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.WriteStream()); + client.innerApiCalls.createWriteStream = stubSimpleCall(expectedResponse); + const [response] = await client.createWriteStream(request); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.createWriteStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes createWriteStream without error using callback', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest()); + request.parent = ''; + const expectedHeaderRequestParams = "parent="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.WriteStream()); + client.innerApiCalls.createWriteStream = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.createWriteStream( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.IWriteStream|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.createWriteStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes createWriteStream with error', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest()); + request.parent = ''; + const expectedHeaderRequestParams = "parent="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedError = new Error('expected'); + client.innerApiCalls.createWriteStream = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.createWriteStream(request), expectedError); + assert((client.innerApiCalls.createWriteStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes createWriteStream with closed client', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest()); + request.parent = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.createWriteStream(request), expectedError); + }); + }); + + describe('getWriteStream', () => { + it('invokes getWriteStream without error', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.GetWriteStreamRequest()); + request.name = ''; + const expectedHeaderRequestParams = "name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.WriteStream()); + client.innerApiCalls.getWriteStream = stubSimpleCall(expectedResponse); + const [response] = await client.getWriteStream(request); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.getWriteStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes getWriteStream without error using callback', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.GetWriteStreamRequest()); + request.name = ''; + const expectedHeaderRequestParams = "name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.WriteStream()); + client.innerApiCalls.getWriteStream = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getWriteStream( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.IWriteStream|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.getWriteStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes getWriteStream with error', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.GetWriteStreamRequest()); + request.name = ''; + const expectedHeaderRequestParams = "name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedError = new Error('expected'); + client.innerApiCalls.getWriteStream = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getWriteStream(request), expectedError); + assert((client.innerApiCalls.getWriteStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes getWriteStream with closed client', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.GetWriteStreamRequest()); + request.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getWriteStream(request), expectedError); + }); + }); + + describe('finalizeWriteStream', () => { + it('invokes finalizeWriteStream without error', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest()); + request.name = ''; + const expectedHeaderRequestParams = "name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse()); + client.innerApiCalls.finalizeWriteStream = stubSimpleCall(expectedResponse); + const [response] = await client.finalizeWriteStream(request); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.finalizeWriteStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes finalizeWriteStream without error using callback', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest()); + request.name = ''; + const expectedHeaderRequestParams = "name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse()); + client.innerApiCalls.finalizeWriteStream = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.finalizeWriteStream( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.finalizeWriteStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes finalizeWriteStream with error', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest()); + request.name = ''; + const expectedHeaderRequestParams = "name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedError = new Error('expected'); + client.innerApiCalls.finalizeWriteStream = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.finalizeWriteStream(request), expectedError); + assert((client.innerApiCalls.finalizeWriteStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes finalizeWriteStream with closed client', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest()); + request.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.finalizeWriteStream(request), expectedError); + }); + }); + + describe('batchCommitWriteStreams', () => { + it('invokes batchCommitWriteStreams without error', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest()); + request.parent = ''; + const expectedHeaderRequestParams = "parent="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse()); + client.innerApiCalls.batchCommitWriteStreams = stubSimpleCall(expectedResponse); + const [response] = await client.batchCommitWriteStreams(request); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.batchCommitWriteStreams as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes batchCommitWriteStreams without error using callback', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest()); + request.parent = ''; + const expectedHeaderRequestParams = "parent="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse()); + client.innerApiCalls.batchCommitWriteStreams = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.batchCommitWriteStreams( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.batchCommitWriteStreams as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes batchCommitWriteStreams with error', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest()); + request.parent = ''; + const expectedHeaderRequestParams = "parent="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedError = new Error('expected'); + client.innerApiCalls.batchCommitWriteStreams = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.batchCommitWriteStreams(request), expectedError); + assert((client.innerApiCalls.batchCommitWriteStreams as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes batchCommitWriteStreams with closed client', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest()); + request.parent = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.batchCommitWriteStreams(request), expectedError); + }); + }); + + describe('flushRows', () => { + it('invokes flushRows without error', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FlushRowsRequest()); + request.writeStream = ''; + const expectedHeaderRequestParams = "write_stream="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FlushRowsResponse()); + client.innerApiCalls.flushRows = stubSimpleCall(expectedResponse); + const [response] = await client.flushRows(request); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.flushRows as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes flushRows without error using callback', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FlushRowsRequest()); + request.writeStream = ''; + const expectedHeaderRequestParams = "write_stream="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FlushRowsResponse()); + client.innerApiCalls.flushRows = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.flushRows( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.flushRows as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes flushRows with error', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FlushRowsRequest()); + request.writeStream = ''; + const expectedHeaderRequestParams = "write_stream="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedError = new Error('expected'); + client.innerApiCalls.flushRows = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.flushRows(request), expectedError); + assert((client.innerApiCalls.flushRows as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes flushRows with closed client', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FlushRowsRequest()); + request.writeStream = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.flushRows(request), expectedError); + }); + }); + + describe('appendRows', () => { + it('invokes appendRows without error', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.AppendRowsRequest()); + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.AppendRowsResponse()); + client.innerApiCalls.appendRows = stubBidiStreamingCall(expectedResponse); + const stream = client.appendRows(); + const promise = new Promise((resolve, reject) => { + stream.on('data', (response: protos.google.cloud.bigquery.storage.v1.AppendRowsResponse) => { + resolve(response); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + stream.write(request); + stream.end(); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.appendRows as SinonStub) + .getCall(0).calledWith(null)); + assert.deepStrictEqual(((stream as unknown as PassThrough) + ._transform as SinonStub).getCall(0).args[0], request); + }); + + it('invokes appendRows with error', async () => { + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.AppendRowsRequest()); + const expectedError = new Error('expected'); + client.innerApiCalls.appendRows = stubBidiStreamingCall(undefined, expectedError); + const stream = client.appendRows(); + const promise = new Promise((resolve, reject) => { + stream.on('data', (response: protos.google.cloud.bigquery.storage.v1.AppendRowsResponse) => { + resolve(response); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + stream.write(request); + stream.end(); + }); + await assert.rejects(promise, expectedError); + assert((client.innerApiCalls.appendRows as SinonStub) + .getCall(0).calledWith(null)); + assert.deepStrictEqual(((stream as unknown as PassThrough) + ._transform as SinonStub).getCall(0).args[0], request); + }); + }); + + describe('Path templates', () => { + + describe('project', () => { + const fakePath = "/rendered/path/project"; + const expectedParameters = { + project: "projectValue", + }; + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectPath', () => { + const result = client.projectPath("projectValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectName', () => { + const result = client.matchProjectFromProjectName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('readSession', () => { + const fakePath = "/rendered/path/readSession"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + session: "sessionValue", + }; + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.readSessionPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.readSessionPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('readSessionPath', () => { + const result = client.readSessionPath("projectValue", "locationValue", "sessionValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.readSessionPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromReadSessionName', () => { + const result = client.matchProjectFromReadSessionName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromReadSessionName', () => { + const result = client.matchLocationFromReadSessionName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchSessionFromReadSessionName', () => { + const result = client.matchSessionFromReadSessionName(fakePath); + assert.strictEqual(result, "sessionValue"); + assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('readStream', () => { + const fakePath = "/rendered/path/readStream"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + session: "sessionValue", + stream: "streamValue", + }; + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.readStreamPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.readStreamPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('readStreamPath', () => { + const result = client.readStreamPath("projectValue", "locationValue", "sessionValue", "streamValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.readStreamPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromReadStreamName', () => { + const result = client.matchProjectFromReadStreamName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromReadStreamName', () => { + const result = client.matchLocationFromReadStreamName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchSessionFromReadStreamName', () => { + const result = client.matchSessionFromReadStreamName(fakePath); + assert.strictEqual(result, "sessionValue"); + assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchStreamFromReadStreamName', () => { + const result = client.matchStreamFromReadStreamName(fakePath); + assert.strictEqual(result, "streamValue"); + assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('table', () => { + const fakePath = "/rendered/path/table"; + const expectedParameters = { + project: "projectValue", + dataset: "datasetValue", + table: "tableValue", + }; + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.tablePathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.tablePathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('tablePath', () => { + const result = client.tablePath("projectValue", "datasetValue", "tableValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.tablePathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromTableName', () => { + const result = client.matchProjectFromTableName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.tablePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchDatasetFromTableName', () => { + const result = client.matchDatasetFromTableName(fakePath); + assert.strictEqual(result, "datasetValue"); + assert((client.pathTemplates.tablePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchTableFromTableName', () => { + const result = client.matchTableFromTableName(fakePath); + assert.strictEqual(result, "tableValue"); + assert((client.pathTemplates.tablePathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('writeStream', () => { + const fakePath = "/rendered/path/writeStream"; + const expectedParameters = { + project: "projectValue", + dataset: "datasetValue", + table: "tableValue", + stream: "streamValue", + }; + const client = new bigquerywriteModule.v1.BigQueryWriteClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.writeStreamPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.writeStreamPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('writeStreamPath', () => { + const result = client.writeStreamPath("projectValue", "datasetValue", "tableValue", "streamValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.writeStreamPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromWriteStreamName', () => { + const result = client.matchProjectFromWriteStreamName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchDatasetFromWriteStreamName', () => { + const result = client.matchDatasetFromWriteStreamName(fakePath); + assert.strictEqual(result, "datasetValue"); + assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchTableFromWriteStreamName', () => { + const result = client.matchTableFromWriteStreamName(fakePath); + assert.strictEqual(result, "tableValue"); + assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchStreamFromWriteStreamName', () => { + const result = client.matchStreamFromWriteStreamName(fakePath); + assert.strictEqual(result, "streamValue"); + assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + }); +}); diff --git a/owl-bot-staging/v1/tsconfig.json b/owl-bot-staging/v1/tsconfig.json new file mode 100644 index 00000000..c78f1c88 --- /dev/null +++ b/owl-bot-staging/v1/tsconfig.json @@ -0,0 +1,19 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "resolveJsonModule": true, + "lib": [ + "es2018", + "dom" + ] + }, + "include": [ + "src/*.ts", + "src/**/*.ts", + "test/*.ts", + "test/**/*.ts", + "system-test/*.ts" + ] +} diff --git a/owl-bot-staging/v1/webpack.config.js b/owl-bot-staging/v1/webpack.config.js new file mode 100644 index 00000000..bad73cc0 --- /dev/null +++ b/owl-bot-staging/v1/webpack.config.js @@ -0,0 +1,64 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const path = require('path'); + +module.exports = { + entry: './src/index.ts', + output: { + library: 'BigQueryRead', + filename: './big-query-read.js', + }, + node: { + child_process: 'empty', + fs: 'empty', + crypto: 'empty', + }, + resolve: { + alias: { + '../../../package.json': path.resolve(__dirname, 'package.json'), + }, + extensions: ['.js', '.json', '.ts'], + }, + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + }, + { + test: /node_modules[\\/]@grpc[\\/]grpc-js/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]grpc/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]retry-request/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]https?-proxy-agent/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]gtoken/, + use: 'null-loader' + }, + ], + }, + mode: 'production', +}; diff --git a/owl-bot-staging/v1beta1/.eslintignore b/owl-bot-staging/v1beta1/.eslintignore new file mode 100644 index 00000000..cfc348ec --- /dev/null +++ b/owl-bot-staging/v1beta1/.eslintignore @@ -0,0 +1,7 @@ +**/node_modules +**/.coverage +build/ +docs/ +protos/ +system-test/ +samples/generated/ diff --git a/owl-bot-staging/v1beta1/.eslintrc.json b/owl-bot-staging/v1beta1/.eslintrc.json new file mode 100644 index 00000000..78215349 --- /dev/null +++ b/owl-bot-staging/v1beta1/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/owl-bot-staging/v1beta1/.gitignore b/owl-bot-staging/v1beta1/.gitignore new file mode 100644 index 00000000..5d32b237 --- /dev/null +++ b/owl-bot-staging/v1beta1/.gitignore @@ -0,0 +1,14 @@ +**/*.log +**/node_modules +.coverage +coverage +.nyc_output +docs/ +out/ +build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ diff --git a/owl-bot-staging/v1beta1/.jsdoc.js b/owl-bot-staging/v1beta1/.jsdoc.js new file mode 100644 index 00000000..21870f2a --- /dev/null +++ b/owl-bot-staging/v1beta1/.jsdoc.js @@ -0,0 +1,55 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2022 Google LLC', + includeDate: false, + sourceFiles: false, + systemName: '@google-cloud/bigquery-storage', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/owl-bot-staging/v1beta1/.mocharc.js b/owl-bot-staging/v1beta1/.mocharc.js new file mode 100644 index 00000000..481c522b --- /dev/null +++ b/owl-bot-staging/v1beta1/.mocharc.js @@ -0,0 +1,33 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000 +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/owl-bot-staging/v1beta1/.prettierrc.js b/owl-bot-staging/v1beta1/.prettierrc.js new file mode 100644 index 00000000..494e1478 --- /dev/null +++ b/owl-bot-staging/v1beta1/.prettierrc.js @@ -0,0 +1,22 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/owl-bot-staging/v1beta1/README.md b/owl-bot-staging/v1beta1/README.md new file mode 100644 index 00000000..f5dcfbaf --- /dev/null +++ b/owl-bot-staging/v1beta1/README.md @@ -0,0 +1 @@ +Storage: Nodejs Client diff --git a/owl-bot-staging/v1beta1/linkinator.config.json b/owl-bot-staging/v1beta1/linkinator.config.json new file mode 100644 index 00000000..befd23c8 --- /dev/null +++ b/owl-bot-staging/v1beta1/linkinator.config.json @@ -0,0 +1,16 @@ +{ + "recurse": true, + "skip": [ + "https://codecov.io/gh/googleapis/", + "www.googleapis.com", + "img.shields.io", + "https://console.cloud.google.com/cloudshell", + "https://support.google.com" + ], + "silent": true, + "concurrency": 5, + "retry": true, + "retryErrors": true, + "retryErrorsCount": 5, + "retryErrorsJitter": 3000 +} diff --git a/owl-bot-staging/v1beta1/package.json b/owl-bot-staging/v1beta1/package.json new file mode 100644 index 00000000..763726e4 --- /dev/null +++ b/owl-bot-staging/v1beta1/package.json @@ -0,0 +1,64 @@ +{ + "name": "@google-cloud/bigquery-storage", + "version": "0.1.0", + "description": "Storage client for Node.js", + "repository": "googleapis/nodejs-storage", + "license": "Apache-2.0", + "author": "Google LLC", + "main": "build/src/index.js", + "files": [ + "build/src", + "build/protos" + ], + "keywords": [ + "google apis client", + "google api client", + "google apis", + "google api", + "google", + "google cloud platform", + "google cloud", + "cloud", + "google storage", + "storage", + "big query storage" + ], + "scripts": { + "clean": "gts clean", + "compile": "tsc -p . && cp -r protos build/", + "compile-protos": "compileProtos src", + "docs": "jsdoc -c .jsdoc.js", + "predocs-test": "npm run docs", + "docs-test": "linkinator docs", + "fix": "gts fix", + "lint": "gts check", + "prepare": "npm run compile-protos && npm run compile", + "system-test": "c8 mocha build/system-test", + "test": "c8 mocha build/test" + }, + "dependencies": { + "google-gax": "^2.29.4" + }, + "devDependencies": { + "@types/mocha": "^9.1.0", + "@types/node": "^16.0.0", + "@types/sinon": "^10.0.8", + "c8": "^7.11.0", + "gts": "^3.1.0", + "jsdoc": "^3.6.7", + "jsdoc-fresh": "^1.1.1", + "jsdoc-region-tag": "^1.3.1", + "linkinator": "^3.0.0", + "mocha": "^9.1.4", + "null-loader": "^4.0.1", + "pack-n-play": "^1.0.0-2", + "sinon": "^13.0.0", + "ts-loader": "^9.2.6", + "typescript": "^4.5.5", + "webpack": "^5.67.0", + "webpack-cli": "^4.9.1" + }, + "engines": { + "node": ">=v10.24.0" + } +} diff --git a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto new file mode 100644 index 00000000..f70c61c7 --- /dev/null +++ b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto @@ -0,0 +1,36 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_outer_classname = "ArrowProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Arrow schema. +message ArrowSchema { + // IPC serialized Arrow schema. + bytes serialized_schema = 1; +} + +// Arrow RecordBatch. +message ArrowRecordBatch { + // IPC serialized Arrow RecordBatch. + bytes serialized_record_batch = 1; + + // The count of rows in the returning block. + int64 row_count = 2; +} diff --git a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/avro.proto b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/avro.proto new file mode 100644 index 00000000..7d034a28 --- /dev/null +++ b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/avro.proto @@ -0,0 +1,37 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_outer_classname = "AvroProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Avro schema. +message AvroSchema { + // Json serialized schema, as described at + // https://avro.apache.org/docs/1.8.1/spec.html + string schema = 1; +} + +// Avro rows. +message AvroRows { + // Binary serialized rows in a block. + bytes serialized_binary_rows = 1; + + // The count of rows in the returning block. + int64 row_count = 2; +} diff --git a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto new file mode 100644 index 00000000..1ff8d8b5 --- /dev/null +++ b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto @@ -0,0 +1,39 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Options dictating how we read a table. +message TableReadOptions { + // Optional. Names of the fields in the table that should be read. If empty, + // all fields will be read. If the specified field is a nested field, all the + // sub-fields in the field will be selected. The output field order is + // unrelated to the order of fields in selected_fields. + repeated string selected_fields = 1; + + // Optional. SQL text filtering statement, similar to a WHERE clause in + // a query. Aggregates are not supported. + // + // Examples: "int_field > 5" + // "date_field = CAST('2014-9-27' as DATE)" + // "nullable_field is not NULL" + // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" + // "numeric_field BETWEEN 1.0 AND 5.0" + string row_restriction = 2; +} diff --git a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/storage.proto b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/storage.proto new file mode 100644 index 00000000..0d311418 --- /dev/null +++ b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/storage.proto @@ -0,0 +1,405 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1beta1/arrow.proto"; +import "google/cloud/bigquery/storage/v1beta1/avro.proto"; +import "google/cloud/bigquery/storage/v1beta1/read_options.proto"; +import "google/cloud/bigquery/storage/v1beta1/table_reference.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// BigQuery storage API. +// +// The BigQuery storage API can be used to read data stored in BigQuery. +service BigQueryStorage { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a new read session. A read session divides the contents of a + // BigQuery table into one or more streams, which can then be used to read + // data from the table. The read session also specifies properties of the + // data to be read, such as a list of columns or a push-down filter describing + // the rows to be returned. + // + // A particular row can be read by at most one stream. When the caller has + // reached the end of each stream in the session, then all the data in the + // table has been read. + // + // Read sessions automatically expire 24 hours after they are created and do + // not require manual clean-up by the caller. + rpc CreateReadSession(CreateReadSessionRequest) returns (ReadSession) { + option (google.api.http) = { + post: "/v1beta1/{table_reference.project_id=projects/*}" + body: "*" + additional_bindings { + post: "/v1beta1/{table_reference.dataset_id=projects/*/datasets/*}" + body: "*" + } + }; + option (google.api.method_signature) = "table_reference,parent,requested_streams"; + } + + // Reads rows from the table in the format prescribed by the read session. + // Each response contains one or more table rows, up to a maximum of 10 MiB + // per response; read requests which attempt to read individual rows larger + // than this will fail. + // + // Each request also returns a set of stream statistics reflecting the + // estimated total number of rows in the read stream. This number is computed + // based on the total table size and the number of active streams in the read + // session, and may change as other streams continue to read data. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + option (google.api.http) = { + get: "/v1beta1/{read_position.stream.name=projects/*/streams/*}" + }; + option (google.api.method_signature) = "read_position"; + } + + // Creates additional streams for a ReadSession. This API can be used to + // dynamically adjust the parallelism of a batch processing task upwards by + // adding additional workers. + rpc BatchCreateReadSessionStreams(BatchCreateReadSessionStreamsRequest) returns (BatchCreateReadSessionStreamsResponse) { + option (google.api.http) = { + post: "/v1beta1/{session.name=projects/*/sessions/*}" + body: "*" + }; + option (google.api.method_signature) = "session,requested_streams"; + } + + // Triggers the graceful termination of a single stream in a ReadSession. This + // API can be used to dynamically adjust the parallelism of a batch processing + // task downwards without losing data. + // + // This API does not delete the stream -- it remains visible in the + // ReadSession, and any data processed by the stream is not released to other + // streams. However, no additional data will be assigned to the stream once + // this call completes. Callers must continue reading data on the stream until + // the end of the stream is reached so that data which has already been + // assigned to the stream will be processed. + // + // This method will return an error if there are no other live streams + // in the Session, or if SplitReadStream() has been called on the given + // Stream. + rpc FinalizeStream(FinalizeStreamRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1beta1/{stream.name=projects/*/streams/*}" + body: "*" + }; + option (google.api.method_signature) = "stream"; + } + + // Splits a given read stream into two Streams. These streams are referred to + // as the primary and the residual of the split. The original stream can still + // be read from in the same manner as before. Both of the returned streams can + // also be read from, and the total rows return by both child streams will be + // the same as the rows read from the original stream. + // + // Moreover, the two child streams will be allocated back to back in the + // original Stream. Concretely, it is guaranteed that for streams Original, + // Primary, and Residual, that Original[0-j] = Primary[0-j] and + // Original[j-n] = Residual[0-m] once the streams have been read to + // completion. + // + // This method is guaranteed to be idempotent. + rpc SplitReadStream(SplitReadStreamRequest) returns (SplitReadStreamResponse) { + option (google.api.http) = { + get: "/v1beta1/{original_stream.name=projects/*/streams/*}" + }; + option (google.api.method_signature) = "original_stream"; + } +} + +// Information about a single data stream within a read session. +message Stream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/Stream" + pattern: "projects/{project}/locations/{location}/streams/{stream}" + }; + + // Name of the stream, in the form + // `projects/{project_id}/locations/{location}/streams/{stream_id}`. + string name = 1; +} + +// Expresses a point within a given stream using an offset position. +message StreamPosition { + // Identifier for a given Stream. + Stream stream = 1; + + // Position in the stream. + int64 offset = 2; +} + +// Information returned from a `CreateReadSession` request. +message ReadSession { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadSession" + pattern: "projects/{project}/locations/{location}/sessions/{session}" + }; + + // Unique identifier for the session, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}`. + string name = 1; + + // Time at which the session becomes invalid. After this time, subsequent + // requests to read this Session will return errors. + google.protobuf.Timestamp expire_time = 2; + + // The schema for the read. If read_options.selected_fields is set, the + // schema may be different from the table schema as it will only contain + // the selected fields. + oneof schema { + // Avro schema. + AvroSchema avro_schema = 5; + + // Arrow schema. + ArrowSchema arrow_schema = 6; + } + + // Streams associated with this session. + repeated Stream streams = 4; + + // Table that this ReadSession is reading from. + TableReference table_reference = 7; + + // Any modifiers which are applied when reading from the specified table. + TableModifiers table_modifiers = 8; + + // The strategy to use for distributing data among the streams. + ShardingStrategy sharding_strategy = 9; +} + +// Creates a new read session, which may include additional options such as +// requested parallelism, projection filters and constraints. +message CreateReadSessionRequest { + // Required. Reference to the table to read. + TableReference table_reference = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. String of the form `projects/{project_id}` indicating the + // project this ReadSession is associated with. This is the project that will + // be billed for usage. + string parent = 6 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Any modifiers to the Table (e.g. snapshot timestamp). + TableModifiers table_modifiers = 2; + + // Initial number of streams. If unset or 0, we will + // provide a value of streams so as to produce reasonable throughput. Must be + // non-negative. The number of streams may be lower than the requested number, + // depending on the amount parallelism that is reasonable for the table and + // the maximum amount of parallelism allowed by the system. + // + // Streams must be read starting from offset 0. + int32 requested_streams = 3; + + // Read options for this session (e.g. column selection, filters). + TableReadOptions read_options = 4; + + // Data output format. Currently default to Avro. + DataFormat format = 5; + + // The strategy to use for distributing data among multiple streams. Currently + // defaults to liquid sharding. + ShardingStrategy sharding_strategy = 7; +} + +// Data format for input or output data. +enum DataFormat { + // Data format is unspecified. + DATA_FORMAT_UNSPECIFIED = 0; + + // Avro is a standard open source row based file format. + // See https://avro.apache.org/ for more details. + AVRO = 1; + + ARROW = 3; +} + +// Strategy for distributing data among multiple streams in a read session. +enum ShardingStrategy { + // Same as LIQUID. + SHARDING_STRATEGY_UNSPECIFIED = 0; + + // Assigns data to each stream based on the client's read rate. The faster the + // client reads from a stream, the more data is assigned to the stream. In + // this strategy, it's possible to read all data from a single stream even if + // there are other streams present. + LIQUID = 1; + + // Assigns data to each stream such that roughly the same number of rows can + // be read from each stream. Because the server-side unit for assigning data + // is collections of rows, the API does not guarantee that each stream will + // return the same number or rows. Additionally, the limits are enforced based + // on the number of pre-filtering rows, so some filters can lead to lopsided + // assignments. + BALANCED = 2; +} + +// Requesting row data via `ReadRows` must provide Stream position information. +message ReadRowsRequest { + // Required. Identifier of the position in the stream to start reading from. + // The offset requested must be less than the last row read from ReadRows. + // Requesting a larger offset is undefined. + StreamPosition read_position = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Progress information for a given Stream. +message StreamStatus { + // Number of estimated rows in the current stream. May change over time as + // different readers in the stream progress at rates which are relatively fast + // or slow. + int64 estimated_row_count = 1; + + // A value in the range [0.0, 1.0] that represents the fraction of rows + // assigned to this stream that have been processed by the server. In the + // presence of read filters, the server may process more rows than it returns, + // so this value reflects progress through the pre-filtering rows. + // + // This value is only populated for sessions created through the BALANCED + // sharding strategy. + float fraction_consumed = 2; + + // Represents the progress of the current stream. + Progress progress = 4; + + // Whether this stream can be split. For sessions that use the LIQUID sharding + // strategy, this value is always false. For BALANCED sessions, this value is + // false when enough data have been read such that no more splits are possible + // at that point or beyond. For small tables or streams that are the result of + // a chain of splits, this value may never be true. + bool is_splittable = 3; +} + +message Progress { + // The fraction of rows assigned to the stream that have been processed by the + // server so far, not including the rows in the current response message. + // + // This value, along with `at_response_end`, can be used to interpolate the + // progress made as the rows in the message are being processed using the + // following formula: `at_response_start + (at_response_end - + // at_response_start) * rows_processed_from_response / rows_in_response`. + // + // Note that if a filter is provided, the `at_response_end` value of the + // previous response may not necessarily be equal to the `at_response_start` + // value of the current response. + float at_response_start = 1; + + // Similar to `at_response_start`, except that this value includes the rows in + // the current response. + float at_response_end = 2; +} + +// Information on if the current connection is being throttled. +message ThrottleStatus { + // How much this connection is being throttled. + // 0 is no throttling, 100 is completely throttled. + int32 throttle_percent = 1; +} + +// Response from calling `ReadRows` may include row data, progress and +// throttling information. +message ReadRowsResponse { + // Row data is returned in format specified during session creation. + oneof rows { + // Serialized row data in AVRO format. + AvroRows avro_rows = 3; + + // Serialized row data in Arrow RecordBatch format. + ArrowRecordBatch arrow_record_batch = 4; + } + + // Number of serialized rows in the rows block. This value is recorded here, + // in addition to the row_count values in the output-specific messages in + // `rows`, so that code which needs to record progress through the stream can + // do so in an output format-independent way. + int64 row_count = 6; + + // Estimated stream statistics. + StreamStatus status = 2; + + // Throttling status. If unset, the latest response still describes + // the current throttling status. + ThrottleStatus throttle_status = 5; +} + +// Information needed to request additional streams for an established read +// session. +message BatchCreateReadSessionStreamsRequest { + // Required. Must be a non-expired session obtained from a call to + // CreateReadSession. Only the name field needs to be set. + ReadSession session = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Number of new streams requested. Must be positive. + // Number of added streams may be less than this, see CreateReadSessionRequest + // for more information. + int32 requested_streams = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// The response from `BatchCreateReadSessionStreams` returns the stream +// identifiers for the newly created streams. +message BatchCreateReadSessionStreamsResponse { + // Newly added streams. + repeated Stream streams = 1; +} + +// Request information for invoking `FinalizeStream`. +message FinalizeStreamRequest { + // Required. Stream to finalize. + Stream stream = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request information for `SplitReadStream`. +message SplitReadStreamRequest { + // Required. Stream to split. + Stream original_stream = 1 [(google.api.field_behavior) = REQUIRED]; + + // A value in the range (0.0, 1.0) that specifies the fractional point at + // which the original stream should be split. The actual split point is + // evaluated on pre-filtered rows, so if a filter is provided, then there is + // no guarantee that the division of the rows between the new child streams + // will be proportional to this fractional value. Additionally, because the + // server-side unit for assigning data is collections of rows, this fraction + // will always map to to a data storage boundary on the server side. + float fraction = 2; +} + +// Response from `SplitReadStream`. +message SplitReadStreamResponse { + // Primary stream, which contains the beginning portion of + // |original_stream|. An empty value indicates that the original stream can no + // longer be split. + Stream primary_stream = 1; + + // Remainder stream, which contains the tail of |original_stream|. An empty + // value indicates that the original stream can no longer be split. + Stream remainder_stream = 2; +} diff --git a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto new file mode 100644 index 00000000..4269392f --- /dev/null +++ b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto @@ -0,0 +1,42 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta1; + +import "google/api/resource.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; +option java_outer_classname = "TableReferenceProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta1"; + +// Table reference that includes just the 3 strings needed to identify a table. +message TableReference { + // The assigned project ID of the project. + string project_id = 1; + + // The ID of the dataset in the above project. + string dataset_id = 2; + + // The ID of the table in the above dataset. + string table_id = 3; +} + +// All fields in this message optional. +message TableModifiers { + // The snapshot time of the table. If not set, interpreted as now. + google.protobuf.Timestamp snapshot_time = 1; +} diff --git a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js new file mode 100644 index 00000000..3d7b6d3e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js @@ -0,0 +1,66 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(session, requestedStreams) { + // [START bigquerystorage_v1beta1_generated_BigQueryStorage_BatchCreateReadSessionStreams_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Must be a non-expired session obtained from a call to + * CreateReadSession. Only the name field needs to be set. + */ + // const session = {} + /** + * Required. Number of new streams requested. Must be positive. + * Number of added streams may be less than this, see CreateReadSessionRequest + * for more information. + */ + // const requestedStreams = 1234 + + // Imports the Storage library + const {BigQueryStorageClient} = require('@google-cloud/bigquery-storage').v1beta1; + + // Instantiates a client + const storageClient = new BigQueryStorageClient(); + + async function callBatchCreateReadSessionStreams() { + // Construct request + const request = { + session, + requestedStreams, + }; + + // Run request + const response = await storageClient.batchCreateReadSessionStreams(request); + console.log(response); + } + + callBatchCreateReadSessionStreams(); + // [END bigquerystorage_v1beta1_generated_BigQueryStorage_BatchCreateReadSessionStreams_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.create_read_session.js b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.create_read_session.js new file mode 100644 index 00000000..c71a464c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.create_read_session.js @@ -0,0 +1,91 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(tableReference, parent) { + // [START bigquerystorage_v1beta1_generated_BigQueryStorage_CreateReadSession_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Reference to the table to read. + */ + // const tableReference = {} + /** + * Required. String of the form `projects/{project_id}` indicating the + * project this ReadSession is associated with. This is the project that will + * be billed for usage. + */ + // const parent = 'abc123' + /** + * Any modifiers to the Table (e.g. snapshot timestamp). + */ + // const tableModifiers = {} + /** + * Initial number of streams. If unset or 0, we will + * provide a value of streams so as to produce reasonable throughput. Must be + * non-negative. The number of streams may be lower than the requested number, + * depending on the amount parallelism that is reasonable for the table and + * the maximum amount of parallelism allowed by the system. + * Streams must be read starting from offset 0. + */ + // const requestedStreams = 1234 + /** + * Read options for this session (e.g. column selection, filters). + */ + // const readOptions = {} + /** + * Data output format. Currently default to Avro. + */ + // const format = {} + /** + * The strategy to use for distributing data among multiple streams. Currently + * defaults to liquid sharding. + */ + // const shardingStrategy = {} + + // Imports the Storage library + const {BigQueryStorageClient} = require('@google-cloud/bigquery-storage').v1beta1; + + // Instantiates a client + const storageClient = new BigQueryStorageClient(); + + async function callCreateReadSession() { + // Construct request + const request = { + tableReference, + parent, + }; + + // Run request + const response = await storageClient.createReadSession(request); + console.log(response); + } + + callCreateReadSession(); + // [END bigquerystorage_v1beta1_generated_BigQueryStorage_CreateReadSession_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.finalize_stream.js b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.finalize_stream.js new file mode 100644 index 00000000..51d63d0b --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.finalize_stream.js @@ -0,0 +1,58 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(stream) { + // [START bigquerystorage_v1beta1_generated_BigQueryStorage_FinalizeStream_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Stream to finalize. + */ + // const stream = {} + + // Imports the Storage library + const {BigQueryStorageClient} = require('@google-cloud/bigquery-storage').v1beta1; + + // Instantiates a client + const storageClient = new BigQueryStorageClient(); + + async function callFinalizeStream() { + // Construct request + const request = { + stream, + }; + + // Run request + const response = await storageClient.finalizeStream(request); + console.log(response); + } + + callFinalizeStream(); + // [END bigquerystorage_v1beta1_generated_BigQueryStorage_FinalizeStream_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.read_rows.js b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.read_rows.js new file mode 100644 index 00000000..65924b17 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.read_rows.js @@ -0,0 +1,62 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(readPosition) { + // [START bigquerystorage_v1beta1_generated_BigQueryStorage_ReadRows_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Identifier of the position in the stream to start reading from. + * The offset requested must be less than the last row read from ReadRows. + * Requesting a larger offset is undefined. + */ + // const readPosition = {} + + // Imports the Storage library + const {BigQueryStorageClient} = require('@google-cloud/bigquery-storage').v1beta1; + + // Instantiates a client + const storageClient = new BigQueryStorageClient(); + + async function callReadRows() { + // Construct request + const request = { + readPosition, + }; + + // Run request + const stream = await storageClient.readRows(request); + stream.on('data', (response) => { console.log(response) }); + stream.on('error', (err) => { throw(err) }); + stream.on('end', () => { /* API call completed */ }); + } + + callReadRows(); + // [END bigquerystorage_v1beta1_generated_BigQueryStorage_ReadRows_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.split_read_stream.js b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.split_read_stream.js new file mode 100644 index 00000000..24ae597d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.split_read_stream.js @@ -0,0 +1,68 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(originalStream) { + // [START bigquerystorage_v1beta1_generated_BigQueryStorage_SplitReadStream_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Stream to split. + */ + // const originalStream = {} + /** + * A value in the range (0.0, 1.0) that specifies the fractional point at + * which the original stream should be split. The actual split point is + * evaluated on pre-filtered rows, so if a filter is provided, then there is + * no guarantee that the division of the rows between the new child streams + * will be proportional to this fractional value. Additionally, because the + * server-side unit for assigning data is collections of rows, this fraction + * will always map to to a data storage boundary on the server side. + */ + // const fraction = 1234 + + // Imports the Storage library + const {BigQueryStorageClient} = require('@google-cloud/bigquery-storage').v1beta1; + + // Instantiates a client + const storageClient = new BigQueryStorageClient(); + + async function callSplitReadStream() { + // Construct request + const request = { + originalStream, + }; + + // Run request + const response = await storageClient.splitReadStream(request); + console.log(response); + } + + callSplitReadStream(); + // [END bigquerystorage_v1beta1_generated_BigQueryStorage_SplitReadStream_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.bigquery.storage.v1beta1.json b/owl-bot-staging/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.bigquery.storage.v1beta1.json new file mode 100644 index 00000000..a36b0694 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.bigquery.storage.v1beta1.json @@ -0,0 +1,247 @@ +{ + "clientLibrary": { + "name": "nodejs-storage", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.bigquery.storage.v1beta1", + "version": "v1beta1" + } + ] + }, + "snippets": [ + { + "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_CreateReadSession_async", + "title": "BigQueryStorage createReadSession Sample", + "origin": "API_DEFINITION", + "description": " Creates a new read session. A read session divides the contents of a BigQuery table into one or more streams, which can then be used to read data from the table. The read session also specifies properties of the data to be read, such as a list of columns or a push-down filter describing the rows to be returned. A particular row can be read by at most one stream. When the caller has reached the end of each stream in the session, then all the data in the table has been read. Read sessions automatically expire 24 hours after they are created and do not require manual clean-up by the caller.", + "canonical": true, + "file": "big_query_storage.create_read_session.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 83, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CreateReadSession", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.CreateReadSession", + "async": true, + "parameters": [ + { + "name": "table_reference", + "type": ".google.cloud.bigquery.storage.v1beta1.TableReference" + }, + { + "name": "parent", + "type": "TYPE_STRING" + }, + { + "name": "table_modifiers", + "type": ".google.cloud.bigquery.storage.v1beta1.TableModifiers" + }, + { + "name": "requested_streams", + "type": "TYPE_INT32" + }, + { + "name": "read_options", + "type": ".google.cloud.bigquery.storage.v1beta1.TableReadOptions" + }, + { + "name": "format", + "type": ".google.cloud.bigquery.storage.v1beta1.DataFormat" + }, + { + "name": "sharding_strategy", + "type": ".google.cloud.bigquery.storage.v1beta1.ShardingStrategy" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1beta1.ReadSession", + "client": { + "shortName": "BigQueryStorageClient", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" + }, + "method": { + "shortName": "CreateReadSession", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.CreateReadSession", + "service": { + "shortName": "BigQueryStorage", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_ReadRows_async", + "title": "BigQueryStorage readRows Sample", + "origin": "API_DEFINITION", + "description": " Reads rows from the table in the format prescribed by the read session. Each response contains one or more table rows, up to a maximum of 10 MiB per response; read requests which attempt to read individual rows larger than this will fail. Each request also returns a set of stream statistics reflecting the estimated total number of rows in the read stream. This number is computed based on the total table size and the number of active streams in the read session, and may change as other streams continue to read data.", + "canonical": true, + "file": "big_query_storage.read_rows.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 54, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ReadRows", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.ReadRows", + "async": true, + "parameters": [ + { + "name": "read_position", + "type": ".google.cloud.bigquery.storage.v1beta1.StreamPosition" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1beta1.ReadRowsResponse", + "client": { + "shortName": "BigQueryStorageClient", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" + }, + "method": { + "shortName": "ReadRows", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.ReadRows", + "service": { + "shortName": "BigQueryStorage", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_BatchCreateReadSessionStreams_async", + "title": "BigQueryStorage batchCreateReadSessionStreams Sample", + "origin": "API_DEFINITION", + "description": " Creates additional streams for a ReadSession. This API can be used to dynamically adjust the parallelism of a batch processing task upwards by adding additional workers.", + "canonical": true, + "file": "big_query_storage.batch_create_read_session_streams.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 58, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "BatchCreateReadSessionStreams", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.BatchCreateReadSessionStreams", + "async": true, + "parameters": [ + { + "name": "session", + "type": ".google.cloud.bigquery.storage.v1beta1.ReadSession" + }, + { + "name": "requested_streams", + "type": "TYPE_INT32" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse", + "client": { + "shortName": "BigQueryStorageClient", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" + }, + "method": { + "shortName": "BatchCreateReadSessionStreams", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.BatchCreateReadSessionStreams", + "service": { + "shortName": "BigQueryStorage", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_FinalizeStream_async", + "title": "BigQueryStorage finalizeStream Sample", + "origin": "API_DEFINITION", + "description": " Triggers the graceful termination of a single stream in a ReadSession. This API can be used to dynamically adjust the parallelism of a batch processing task downwards without losing data. This API does not delete the stream -- it remains visible in the ReadSession, and any data processed by the stream is not released to other streams. However, no additional data will be assigned to the stream once this call completes. Callers must continue reading data on the stream until the end of the stream is reached so that data which has already been assigned to the stream will be processed. This method will return an error if there are no other live streams in the Session, or if SplitReadStream() has been called on the given Stream.", + "canonical": true, + "file": "big_query_storage.finalize_stream.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 50, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "FinalizeStream", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.FinalizeStream", + "async": true, + "parameters": [ + { + "name": "stream", + "type": ".google.cloud.bigquery.storage.v1beta1.Stream" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "BigQueryStorageClient", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" + }, + "method": { + "shortName": "FinalizeStream", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.FinalizeStream", + "service": { + "shortName": "BigQueryStorage", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" + } + } + } + }, + { + "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_SplitReadStream_async", + "title": "BigQueryStorage splitReadStream Sample", + "origin": "API_DEFINITION", + "description": " Splits a given read stream into two Streams. These streams are referred to as the primary and the residual of the split. The original stream can still be read from in the same manner as before. Both of the returned streams can also be read from, and the total rows return by both child streams will be the same as the rows read from the original stream. Moreover, the two child streams will be allocated back to back in the original Stream. Concretely, it is guaranteed that for streams Original, Primary, and Residual, that Original[0-j] = Primary[0-j] and Original[j-n] = Residual[0-m] once the streams have been read to completion. This method is guaranteed to be idempotent.", + "canonical": true, + "file": "big_query_storage.split_read_stream.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 60, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "SplitReadStream", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.SplitReadStream", + "async": true, + "parameters": [ + { + "name": "original_stream", + "type": ".google.cloud.bigquery.storage.v1beta1.Stream" + }, + { + "name": "fraction", + "type": "TYPE_FLOAT" + } + ], + "resultType": ".google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse", + "client": { + "shortName": "BigQueryStorageClient", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" + }, + "method": { + "shortName": "SplitReadStream", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.SplitReadStream", + "service": { + "shortName": "BigQueryStorage", + "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" + } + } + } + } + ] +} diff --git a/owl-bot-staging/v1beta1/src/index.ts b/owl-bot-staging/v1beta1/src/index.ts new file mode 100644 index 00000000..96c87b19 --- /dev/null +++ b/owl-bot-staging/v1beta1/src/index.ts @@ -0,0 +1,25 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as v1beta1 from './v1beta1'; +const BigQueryStorageClient = v1beta1.BigQueryStorageClient; +type BigQueryStorageClient = v1beta1.BigQueryStorageClient; +export {v1beta1, BigQueryStorageClient}; +export default {v1beta1, BigQueryStorageClient}; +import * as protos from '../protos/protos'; +export {protos} diff --git a/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client.ts b/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client.ts new file mode 100644 index 00000000..a1f8bb9c --- /dev/null +++ b/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client.ts @@ -0,0 +1,852 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import {Callback, CallOptions, Descriptors, ClientOptions, GoogleError} from 'google-gax'; + +import { PassThrough } from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1beta1/big_query_storage_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './big_query_storage_client_config.json'; + +const version = require('../../../package.json').version; + +/** + * BigQuery storage API. + * + * The BigQuery storage API can be used to read data stored in BigQuery. + * @class + * @memberof v1beta1 + */ +export class BigQueryStorageClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + pathTemplates: {[name: string]: gax.PathTemplate}; + bigQueryStorageStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of BigQueryStorageClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean} [options.fallback] - Use HTTP fallback mode. + * In fallback mode, a special browser-compatible transport implementation is used + * instead of gRPC transport. In browser context (if the `window` object is defined) + * the fallback mode is enabled automatically; set `options.fallback` to `false` + * if you need to override this behavior. + */ + constructor(opts?: ClientOptions) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof BigQueryStorageClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gax.fallback : gax; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // This API contains "path templates"; forward-slash-separated + // identifiers to uniquely identify resources within the API. + // Create useful helper objects for these. + this.pathTemplates = { + projectPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}' + ), + readSessionPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/sessions/{session}' + ), + streamPathTemplate: new this._gaxModule.PathTemplate( + 'projects/{project}/locations/{location}/streams/{stream}' + ), + }; + + // Some of the methods on this service provide streaming responses. + // Provide descriptors for these. + this.descriptors.stream = { + readRows: new this._gaxModule.StreamDescriptor(gax.StreamType.SERVER_STREAMING, opts.fallback === 'rest') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.storage.v1beta1.BigQueryStorage', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = gax.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.bigQueryStorageStub) { + return this.bigQueryStorageStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.storage.v1beta1.BigQueryStorage. + this.bigQueryStorageStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.storage.v1beta1.BigQueryStorage') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.storage.v1beta1.BigQueryStorage, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const bigQueryStorageStubMethods = + ['createReadSession', 'readRows', 'batchCreateReadSessionStreams', 'finalizeStream', 'splitReadStream']; + for (const methodName of bigQueryStorageStubMethods) { + const callPromise = this.bigQueryStorageStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + if (methodName in this.descriptors.stream) { + const stream = new PassThrough(); + setImmediate(() => { + stream.emit('error', new GoogleError('The client has already been closed.')); + }); + return stream; + } + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.stream[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.bigQueryStorageStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'bigquerystorage.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'bigquerystorage.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Creates a new read session. A read session divides the contents of a + * BigQuery table into one or more streams, which can then be used to read + * data from the table. The read session also specifies properties of the + * data to be read, such as a list of columns or a push-down filter describing + * the rows to be returned. + * + * A particular row can be read by at most one stream. When the caller has + * reached the end of each stream in the session, then all the data in the + * table has been read. + * + * Read sessions automatically expire 24 hours after they are created and do + * not require manual clean-up by the caller. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.bigquery.storage.v1beta1.TableReference} request.tableReference + * Required. Reference to the table to read. + * @param {string} request.parent + * Required. String of the form `projects/{project_id}` indicating the + * project this ReadSession is associated with. This is the project that will + * be billed for usage. + * @param {google.cloud.bigquery.storage.v1beta1.TableModifiers} request.tableModifiers + * Any modifiers to the Table (e.g. snapshot timestamp). + * @param {number} request.requestedStreams + * Initial number of streams. If unset or 0, we will + * provide a value of streams so as to produce reasonable throughput. Must be + * non-negative. The number of streams may be lower than the requested number, + * depending on the amount parallelism that is reasonable for the table and + * the maximum amount of parallelism allowed by the system. + * + * Streams must be read starting from offset 0. + * @param {google.cloud.bigquery.storage.v1beta1.TableReadOptions} request.readOptions + * Read options for this session (e.g. column selection, filters). + * @param {google.cloud.bigquery.storage.v1beta1.DataFormat} request.format + * Data output format. Currently default to Avro. + * @param {google.cloud.bigquery.storage.v1beta1.ShardingStrategy} request.shardingStrategy + * The strategy to use for distributing data among multiple streams. Currently + * defaults to liquid sharding. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [ReadSession]{@link google.cloud.bigquery.storage.v1beta1.ReadSession}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1beta1/big_query_storage.create_read_session.js + * region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_CreateReadSession_async + */ + createReadSession( + request?: protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.storage.v1beta1.IReadSession, + protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|undefined, {}|undefined + ]>; + createReadSession( + request: protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.storage.v1beta1.IReadSession, + protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|null|undefined, + {}|null|undefined>): void; + createReadSession( + request: protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, + callback: Callback< + protos.google.cloud.bigquery.storage.v1beta1.IReadSession, + protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|null|undefined, + {}|null|undefined>): void; + createReadSession( + request?: protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.storage.v1beta1.IReadSession, + protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.storage.v1beta1.IReadSession, + protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.storage.v1beta1.IReadSession, + protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'table_reference.project_id': request.tableReference!.projectId || '', + 'table_reference.dataset_id': request.tableReference!.datasetId || '', + }); + this.initialize(); + return this.innerApiCalls.createReadSession(request, options, callback); + } +/** + * Creates additional streams for a ReadSession. This API can be used to + * dynamically adjust the parallelism of a batch processing task upwards by + * adding additional workers. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.bigquery.storage.v1beta1.ReadSession} request.session + * Required. Must be a non-expired session obtained from a call to + * CreateReadSession. Only the name field needs to be set. + * @param {number} request.requestedStreams + * Required. Number of new streams requested. Must be positive. + * Number of added streams may be less than this, see CreateReadSessionRequest + * for more information. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [BatchCreateReadSessionStreamsResponse]{@link google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js + * region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_BatchCreateReadSessionStreams_async + */ + batchCreateReadSessionStreams( + request?: protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, + protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|undefined, {}|undefined + ]>; + batchCreateReadSessionStreams( + request: protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, + protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|null|undefined, + {}|null|undefined>): void; + batchCreateReadSessionStreams( + request: protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, + callback: Callback< + protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, + protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|null|undefined, + {}|null|undefined>): void; + batchCreateReadSessionStreams( + request?: protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, + protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, + protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, + protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'session.name': request.session!.name || '', + }); + this.initialize(); + return this.innerApiCalls.batchCreateReadSessionStreams(request, options, callback); + } +/** + * Triggers the graceful termination of a single stream in a ReadSession. This + * API can be used to dynamically adjust the parallelism of a batch processing + * task downwards without losing data. + * + * This API does not delete the stream -- it remains visible in the + * ReadSession, and any data processed by the stream is not released to other + * streams. However, no additional data will be assigned to the stream once + * this call completes. Callers must continue reading data on the stream until + * the end of the stream is reached so that data which has already been + * assigned to the stream will be processed. + * + * This method will return an error if there are no other live streams + * in the Session, or if SplitReadStream() has been called on the given + * Stream. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.bigquery.storage.v1beta1.Stream} request.stream + * Required. Stream to finalize. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1beta1/big_query_storage.finalize_stream.js + * region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_FinalizeStream_async + */ + finalizeStream( + request?: protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|undefined, {}|undefined + ]>; + finalizeStream( + request: protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|null|undefined, + {}|null|undefined>): void; + finalizeStream( + request: protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|null|undefined, + {}|null|undefined>): void; + finalizeStream( + request?: protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'stream.name': request.stream!.name || '', + }); + this.initialize(); + return this.innerApiCalls.finalizeStream(request, options, callback); + } +/** + * Splits a given read stream into two Streams. These streams are referred to + * as the primary and the residual of the split. The original stream can still + * be read from in the same manner as before. Both of the returned streams can + * also be read from, and the total rows return by both child streams will be + * the same as the rows read from the original stream. + * + * Moreover, the two child streams will be allocated back to back in the + * original Stream. Concretely, it is guaranteed that for streams Original, + * Primary, and Residual, that Original[0-j] = Primary[0-j] and + * Original[j-n] = Residual[0-m] once the streams have been read to + * completion. + * + * This method is guaranteed to be idempotent. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.bigquery.storage.v1beta1.Stream} request.originalStream + * Required. Stream to split. + * @param {number} request.fraction + * A value in the range (0.0, 1.0) that specifies the fractional point at + * which the original stream should be split. The actual split point is + * evaluated on pre-filtered rows, so if a filter is provided, then there is + * no guarantee that the division of the rows between the new child streams + * will be proportional to this fractional value. Additionally, because the + * server-side unit for assigning data is collections of rows, this fraction + * will always map to to a data storage boundary on the server side. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [SplitReadStreamResponse]{@link google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse}. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) + * for more details and examples. + * @example include:samples/generated/v1beta1/big_query_storage.split_read_stream.js + * region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_SplitReadStream_async + */ + splitReadStream( + request?: protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, + protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|undefined, {}|undefined + ]>; + splitReadStream( + request: protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, + protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|null|undefined, + {}|null|undefined>): void; + splitReadStream( + request: protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest, + callback: Callback< + protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, + protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|null|undefined, + {}|null|undefined>): void; + splitReadStream( + request?: protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, + protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, + protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, + protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'original_stream.name': request.originalStream!.name || '', + }); + this.initialize(); + return this.innerApiCalls.splitReadStream(request, options, callback); + } + +/** + * Reads rows from the table in the format prescribed by the read session. + * Each response contains one or more table rows, up to a maximum of 10 MiB + * per response; read requests which attempt to read individual rows larger + * than this will fail. + * + * Each request also returns a set of stream statistics reflecting the + * estimated total number of rows in the read stream. This number is computed + * based on the total table size and the number of active streams in the read + * session, and may change as other streams continue to read data. + * + * @param {Object} request + * The request object that will be sent. + * @param {google.cloud.bigquery.storage.v1beta1.StreamPosition} request.readPosition + * Required. Identifier of the position in the stream to start reading from. + * The offset requested must be less than the last row read from ReadRows. + * Requesting a larger offset is undefined. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits [ReadRowsResponse]{@link google.cloud.bigquery.storage.v1beta1.ReadRowsResponse} on 'data' event. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#server-streaming) + * for more details and examples. + * @example include:samples/generated/v1beta1/big_query_storage.read_rows.js + * region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_ReadRows_async + */ + readRows( + request?: protos.google.cloud.bigquery.storage.v1beta1.IReadRowsRequest, + options?: CallOptions): + gax.CancellableStream{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = gax.routingHeader.fromParams({ + 'read_position.stream.name': request.readPosition!.stream!.name || '', + }); + this.initialize(); + return this.innerApiCalls.readRows(request, options); + } + + // -------------------- + // -- Path templates -- + // -------------------- + + /** + * Return a fully-qualified project resource name string. + * + * @param {string} project + * @returns {string} Resource name string. + */ + projectPath(project:string) { + return this.pathTemplates.projectPathTemplate.render({ + project: project, + }); + } + + /** + * Parse the project from Project resource. + * + * @param {string} projectName + * A fully-qualified path representing Project resource. + * @returns {string} A string representing the project. + */ + matchProjectFromProjectName(projectName: string) { + return this.pathTemplates.projectPathTemplate.match(projectName).project; + } + + /** + * Return a fully-qualified readSession resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} session + * @returns {string} Resource name string. + */ + readSessionPath(project:string,location:string,session:string) { + return this.pathTemplates.readSessionPathTemplate.render({ + project: project, + location: location, + session: session, + }); + } + + /** + * Parse the project from ReadSession resource. + * + * @param {string} readSessionName + * A fully-qualified path representing ReadSession resource. + * @returns {string} A string representing the project. + */ + matchProjectFromReadSessionName(readSessionName: string) { + return this.pathTemplates.readSessionPathTemplate.match(readSessionName).project; + } + + /** + * Parse the location from ReadSession resource. + * + * @param {string} readSessionName + * A fully-qualified path representing ReadSession resource. + * @returns {string} A string representing the location. + */ + matchLocationFromReadSessionName(readSessionName: string) { + return this.pathTemplates.readSessionPathTemplate.match(readSessionName).location; + } + + /** + * Parse the session from ReadSession resource. + * + * @param {string} readSessionName + * A fully-qualified path representing ReadSession resource. + * @returns {string} A string representing the session. + */ + matchSessionFromReadSessionName(readSessionName: string) { + return this.pathTemplates.readSessionPathTemplate.match(readSessionName).session; + } + + /** + * Return a fully-qualified stream resource name string. + * + * @param {string} project + * @param {string} location + * @param {string} stream + * @returns {string} Resource name string. + */ + streamPath(project:string,location:string,stream:string) { + return this.pathTemplates.streamPathTemplate.render({ + project: project, + location: location, + stream: stream, + }); + } + + /** + * Parse the project from Stream resource. + * + * @param {string} streamName + * A fully-qualified path representing Stream resource. + * @returns {string} A string representing the project. + */ + matchProjectFromStreamName(streamName: string) { + return this.pathTemplates.streamPathTemplate.match(streamName).project; + } + + /** + * Parse the location from Stream resource. + * + * @param {string} streamName + * A fully-qualified path representing Stream resource. + * @returns {string} A string representing the location. + */ + matchLocationFromStreamName(streamName: string) { + return this.pathTemplates.streamPathTemplate.match(streamName).location; + } + + /** + * Parse the stream from Stream resource. + * + * @param {string} streamName + * A fully-qualified path representing Stream resource. + * @returns {string} A string representing the stream. + */ + matchStreamFromStreamName(streamName: string) { + return this.pathTemplates.streamPathTemplate.match(streamName).stream; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.bigQueryStorageStub && !this._terminated) { + return this.bigQueryStorageStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client_config.json b/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client_config.json new file mode 100644 index 00000000..003cb084 --- /dev/null +++ b/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client_config.json @@ -0,0 +1,54 @@ +{ + "interfaces": { + "google.cloud.bigquery.storage.v1beta1.BigQueryStorage": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ], + "unavailable": [ + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "CreateReadSession": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "ReadRows": { + "timeout_millis": 86400000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, + "BatchCreateReadSessionStreams": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "FinalizeStream": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "SplitReadStream": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_proto_list.json b/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_proto_list.json new file mode 100644 index 00000000..0b801075 --- /dev/null +++ b/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_proto_list.json @@ -0,0 +1,7 @@ +[ + "../../protos/google/cloud/bigquery/storage/v1beta1/arrow.proto", + "../../protos/google/cloud/bigquery/storage/v1beta1/avro.proto", + "../../protos/google/cloud/bigquery/storage/v1beta1/read_options.proto", + "../../protos/google/cloud/bigquery/storage/v1beta1/storage.proto", + "../../protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto" +] diff --git a/owl-bot-staging/v1beta1/src/v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/src/v1beta1/gapic_metadata.json new file mode 100644 index 00000000..00d888bf --- /dev/null +++ b/owl-bot-staging/v1beta1/src/v1beta1/gapic_metadata.json @@ -0,0 +1,68 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "typescript", + "protoPackage": "google.cloud.bigquery.storage.v1beta1", + "libraryPackage": "@google-cloud/bigquery-storage", + "services": { + "BigQueryStorage": { + "clients": { + "grpc": { + "libraryClient": "BigQueryStorageClient", + "rpcs": { + "CreateReadSession": { + "methods": [ + "createReadSession" + ] + }, + "BatchCreateReadSessionStreams": { + "methods": [ + "batchCreateReadSessionStreams" + ] + }, + "FinalizeStream": { + "methods": [ + "finalizeStream" + ] + }, + "SplitReadStream": { + "methods": [ + "splitReadStream" + ] + }, + "ReadRows": { + "methods": [ + "readRows" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "BigQueryStorageClient", + "rpcs": { + "CreateReadSession": { + "methods": [ + "createReadSession" + ] + }, + "BatchCreateReadSessionStreams": { + "methods": [ + "batchCreateReadSessionStreams" + ] + }, + "FinalizeStream": { + "methods": [ + "finalizeStream" + ] + }, + "SplitReadStream": { + "methods": [ + "splitReadStream" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1beta1/src/v1beta1/index.ts b/owl-bot-staging/v1beta1/src/v1beta1/index.ts new file mode 100644 index 00000000..dc3afed8 --- /dev/null +++ b/owl-bot-staging/v1beta1/src/v1beta1/index.ts @@ -0,0 +1,19 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +export {BigQueryStorageClient} from './big_query_storage_client'; diff --git a/owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.js new file mode 100644 index 00000000..cc56b095 --- /dev/null +++ b/owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.js @@ -0,0 +1,27 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars */ +const storage = require('@google-cloud/bigquery-storage'); + +function main() { + const bigQueryStorageClient = new storage.BigQueryStorageClient(); +} + +main(); diff --git a/owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.ts new file mode 100644 index 00000000..6e4fec44 --- /dev/null +++ b/owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.ts @@ -0,0 +1,32 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {BigQueryStorageClient} from '@google-cloud/bigquery-storage'; + +// check that the client class type name can be used +function doStuffWithBigQueryStorageClient(client: BigQueryStorageClient) { + client.close(); +} + +function main() { + // check that the client instance can be created + const bigQueryStorageClient = new BigQueryStorageClient(); + doStuffWithBigQueryStorageClient(bigQueryStorageClient); +} + +main(); diff --git a/owl-bot-staging/v1beta1/system-test/install.ts b/owl-bot-staging/v1beta1/system-test/install.ts new file mode 100644 index 00000000..8ec45222 --- /dev/null +++ b/owl-bot-staging/v1beta1/system-test/install.ts @@ -0,0 +1,49 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import { packNTest } from 'pack-n-play'; +import { readFileSync } from 'fs'; +import { describe, it } from 'mocha'; + +describe('📦 pack-n-play test', () => { + + it('TypeScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'TypeScript user can use the type definitions', + ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() + } + }; + await packNTest(options); + }); + + it('JavaScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'JavaScript user can use the library', + ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() + } + }; + await packNTest(options); + }); + +}); diff --git a/owl-bot-staging/v1beta1/test/gapic_big_query_storage_v1beta1.ts b/owl-bot-staging/v1beta1/test/gapic_big_query_storage_v1beta1.ts new file mode 100644 index 00000000..62d96796 --- /dev/null +++ b/owl-bot-staging/v1beta1/test/gapic_big_query_storage_v1beta1.ts @@ -0,0 +1,781 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import { describe, it } from 'mocha'; +import * as bigquerystorageModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubServerStreamingCall(response?: ResponseType, error?: Error) { + const transformStub = error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // write something to the stream to trigger transformStub and send the response back to the client + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + return sinon.stub().returns(mockStream); +} + +describe('v1beta1.BigQueryStorageClient', () => { + it('has servicePath', () => { + const servicePath = bigquerystorageModule.v1beta1.BigQueryStorageClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = bigquerystorageModule.v1beta1.BigQueryStorageClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = bigquerystorageModule.v1beta1.BigQueryStorageClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.bigQueryStorageStub, undefined); + await client.initialize(); + assert(client.bigQueryStorageStub); + }); + + it('has close method for the initialized client', done => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.bigQueryStorageStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.bigQueryStorageStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + + describe('createReadSession', () => { + it('invokes createReadSession without error', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest()); + request.tableReference = {}; + request.tableReference.projectId = ''; + request.tableReference = {}; + request.tableReference.datasetId = ''; + const expectedHeaderRequestParams = "table_reference.project_id=&table_reference.dataset_id="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.ReadSession()); + client.innerApiCalls.createReadSession = stubSimpleCall(expectedResponse); + const [response] = await client.createReadSession(request); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.createReadSession as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes createReadSession without error using callback', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest()); + request.tableReference = {}; + request.tableReference.projectId = ''; + request.tableReference = {}; + request.tableReference.datasetId = ''; + const expectedHeaderRequestParams = "table_reference.project_id=&table_reference.dataset_id="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.ReadSession()); + client.innerApiCalls.createReadSession = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.createReadSession( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1beta1.IReadSession|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.createReadSession as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes createReadSession with error', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest()); + request.tableReference = {}; + request.tableReference.projectId = ''; + request.tableReference = {}; + request.tableReference.datasetId = ''; + const expectedHeaderRequestParams = "table_reference.project_id=&table_reference.dataset_id="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedError = new Error('expected'); + client.innerApiCalls.createReadSession = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.createReadSession(request), expectedError); + assert((client.innerApiCalls.createReadSession as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes createReadSession with closed client', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest()); + request.tableReference = {}; + request.tableReference.projectId = ''; + request.tableReference = {}; + request.tableReference.datasetId = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.createReadSession(request), expectedError); + }); + }); + + describe('batchCreateReadSessionStreams', () => { + it('invokes batchCreateReadSessionStreams without error', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest()); + request.session = {}; + request.session.name = ''; + const expectedHeaderRequestParams = "session.name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse()); + client.innerApiCalls.batchCreateReadSessionStreams = stubSimpleCall(expectedResponse); + const [response] = await client.batchCreateReadSessionStreams(request); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.batchCreateReadSessionStreams as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes batchCreateReadSessionStreams without error using callback', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest()); + request.session = {}; + request.session.name = ''; + const expectedHeaderRequestParams = "session.name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse()); + client.innerApiCalls.batchCreateReadSessionStreams = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.batchCreateReadSessionStreams( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.batchCreateReadSessionStreams as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes batchCreateReadSessionStreams with error', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest()); + request.session = {}; + request.session.name = ''; + const expectedHeaderRequestParams = "session.name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedError = new Error('expected'); + client.innerApiCalls.batchCreateReadSessionStreams = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.batchCreateReadSessionStreams(request), expectedError); + assert((client.innerApiCalls.batchCreateReadSessionStreams as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes batchCreateReadSessionStreams with closed client', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest()); + request.session = {}; + request.session.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.batchCreateReadSessionStreams(request), expectedError); + }); + }); + + describe('finalizeStream', () => { + it('invokes finalizeStream without error', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest()); + request.stream = {}; + request.stream.name = ''; + const expectedHeaderRequestParams = "stream.name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.protobuf.Empty()); + client.innerApiCalls.finalizeStream = stubSimpleCall(expectedResponse); + const [response] = await client.finalizeStream(request); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.finalizeStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes finalizeStream without error using callback', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest()); + request.stream = {}; + request.stream.name = ''; + const expectedHeaderRequestParams = "stream.name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.protobuf.Empty()); + client.innerApiCalls.finalizeStream = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.finalizeStream( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.finalizeStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes finalizeStream with error', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest()); + request.stream = {}; + request.stream.name = ''; + const expectedHeaderRequestParams = "stream.name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedError = new Error('expected'); + client.innerApiCalls.finalizeStream = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.finalizeStream(request), expectedError); + assert((client.innerApiCalls.finalizeStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes finalizeStream with closed client', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest()); + request.stream = {}; + request.stream.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.finalizeStream(request), expectedError); + }); + }); + + describe('splitReadStream', () => { + it('invokes splitReadStream without error', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest()); + request.originalStream = {}; + request.originalStream.name = ''; + const expectedHeaderRequestParams = "original_stream.name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse()); + client.innerApiCalls.splitReadStream = stubSimpleCall(expectedResponse); + const [response] = await client.splitReadStream(request); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.splitReadStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes splitReadStream without error using callback', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest()); + request.originalStream = {}; + request.originalStream.name = ''; + const expectedHeaderRequestParams = "original_stream.name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse()); + client.innerApiCalls.splitReadStream = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.splitReadStream( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.splitReadStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes splitReadStream with error', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest()); + request.originalStream = {}; + request.originalStream.name = ''; + const expectedHeaderRequestParams = "original_stream.name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedError = new Error('expected'); + client.innerApiCalls.splitReadStream = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.splitReadStream(request), expectedError); + assert((client.innerApiCalls.splitReadStream as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes splitReadStream with closed client', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest()); + request.originalStream = {}; + request.originalStream.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.splitReadStream(request), expectedError); + }); + }); + + describe('readRows', () => { + it('invokes readRows without error', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.ReadRowsRequest()); + request.readPosition = {}; + request.readPosition.stream = {}; + request.readPosition.stream.name = ''; + const expectedHeaderRequestParams = "read_position.stream.name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.ReadRowsResponse()); + client.innerApiCalls.readRows = stubServerStreamingCall(expectedResponse); + const stream = client.readRows(request); + const promise = new Promise((resolve, reject) => { + stream.on('data', (response: protos.google.cloud.bigquery.storage.v1beta1.ReadRowsResponse) => { + resolve(response); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.readRows as SinonStub) + .getCall(0).calledWith(request, expectedOptions)); + }); + + it('invokes readRows with error', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.ReadRowsRequest()); + request.readPosition = {}; + request.readPosition.stream = {}; + request.readPosition.stream.name = ''; + const expectedHeaderRequestParams = "read_position.stream.name="; + const expectedOptions = { + otherArgs: { + headers: { + 'x-goog-request-params': expectedHeaderRequestParams, + }, + }, + }; + const expectedError = new Error('expected'); + client.innerApiCalls.readRows = stubServerStreamingCall(undefined, expectedError); + const stream = client.readRows(request); + const promise = new Promise((resolve, reject) => { + stream.on('data', (response: protos.google.cloud.bigquery.storage.v1beta1.ReadRowsResponse) => { + resolve(response); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.innerApiCalls.readRows as SinonStub) + .getCall(0).calledWith(request, expectedOptions)); + }); + + it('invokes readRows with closed client', async () => { + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.ReadRowsRequest()); + request.readPosition = {}; + request.readPosition.stream = {}; + request.readPosition.stream.name = ''; + const expectedError = new Error('The client has already been closed.'); + client.close(); + const stream = client.readRows(request); + const promise = new Promise((resolve, reject) => { + stream.on('data', (response: protos.google.cloud.bigquery.storage.v1beta1.ReadRowsResponse) => { + resolve(response); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + }); + }); + + describe('Path templates', () => { + + describe('project', () => { + const fakePath = "/rendered/path/project"; + const expectedParameters = { + project: "projectValue", + }; + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.projectPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.projectPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('projectPath', () => { + const result = client.projectPath("projectValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.projectPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromProjectName', () => { + const result = client.matchProjectFromProjectName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.projectPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('readSession', () => { + const fakePath = "/rendered/path/readSession"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + session: "sessionValue", + }; + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.readSessionPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.readSessionPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('readSessionPath', () => { + const result = client.readSessionPath("projectValue", "locationValue", "sessionValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.readSessionPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromReadSessionName', () => { + const result = client.matchProjectFromReadSessionName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromReadSessionName', () => { + const result = client.matchLocationFromReadSessionName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchSessionFromReadSessionName', () => { + const result = client.matchSessionFromReadSessionName(fakePath); + assert.strictEqual(result, "sessionValue"); + assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + + describe('stream', () => { + const fakePath = "/rendered/path/stream"; + const expectedParameters = { + project: "projectValue", + location: "locationValue", + stream: "streamValue", + }; + const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + client.pathTemplates.streamPathTemplate.render = + sinon.stub().returns(fakePath); + client.pathTemplates.streamPathTemplate.match = + sinon.stub().returns(expectedParameters); + + it('streamPath', () => { + const result = client.streamPath("projectValue", "locationValue", "streamValue"); + assert.strictEqual(result, fakePath); + assert((client.pathTemplates.streamPathTemplate.render as SinonStub) + .getCall(-1).calledWith(expectedParameters)); + }); + + it('matchProjectFromStreamName', () => { + const result = client.matchProjectFromStreamName(fakePath); + assert.strictEqual(result, "projectValue"); + assert((client.pathTemplates.streamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchLocationFromStreamName', () => { + const result = client.matchLocationFromStreamName(fakePath); + assert.strictEqual(result, "locationValue"); + assert((client.pathTemplates.streamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + + it('matchStreamFromStreamName', () => { + const result = client.matchStreamFromStreamName(fakePath); + assert.strictEqual(result, "streamValue"); + assert((client.pathTemplates.streamPathTemplate.match as SinonStub) + .getCall(-1).calledWith(fakePath)); + }); + }); + }); +}); diff --git a/owl-bot-staging/v1beta1/tsconfig.json b/owl-bot-staging/v1beta1/tsconfig.json new file mode 100644 index 00000000..c78f1c88 --- /dev/null +++ b/owl-bot-staging/v1beta1/tsconfig.json @@ -0,0 +1,19 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "resolveJsonModule": true, + "lib": [ + "es2018", + "dom" + ] + }, + "include": [ + "src/*.ts", + "src/**/*.ts", + "test/*.ts", + "test/**/*.ts", + "system-test/*.ts" + ] +} diff --git a/owl-bot-staging/v1beta1/webpack.config.js b/owl-bot-staging/v1beta1/webpack.config.js new file mode 100644 index 00000000..5f047fa3 --- /dev/null +++ b/owl-bot-staging/v1beta1/webpack.config.js @@ -0,0 +1,64 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const path = require('path'); + +module.exports = { + entry: './src/index.ts', + output: { + library: 'BigQueryStorage', + filename: './big-query-storage.js', + }, + node: { + child_process: 'empty', + fs: 'empty', + crypto: 'empty', + }, + resolve: { + alias: { + '../../../package.json': path.resolve(__dirname, 'package.json'), + }, + extensions: ['.js', '.json', '.ts'], + }, + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + }, + { + test: /node_modules[\\/]@grpc[\\/]grpc-js/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]grpc/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]retry-request/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]https?-proxy-agent/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]gtoken/, + use: 'null-loader' + }, + ], + }, + mode: 'production', +}; From f8e9d4ff6b58e5d98d54c275daeda0eb7f21e9e8 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 24 May 2022 10:03:15 +0000 Subject: [PATCH 2/2] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot=20po?= =?UTF-8?q?st-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- owl-bot-staging/v1/.eslintignore | 7 - owl-bot-staging/v1/.eslintrc.json | 3 - owl-bot-staging/v1/.gitignore | 14 - owl-bot-staging/v1/.jsdoc.js | 55 - owl-bot-staging/v1/.mocharc.js | 33 - owl-bot-staging/v1/.prettierrc.js | 22 - owl-bot-staging/v1/README.md | 1 - owl-bot-staging/v1/linkinator.config.json | 16 - owl-bot-staging/v1/package.json | 65 -- .../cloud/bigquery/storage/v1/arrow.proto | 64 - .../cloud/bigquery/storage/v1/avro.proto | 41 - .../cloud/bigquery/storage/v1/protobuf.proto | 48 - .../cloud/bigquery/storage/v1/storage.proto | 624 ---------- .../cloud/bigquery/storage/v1/stream.proto | 217 ---- .../cloud/bigquery/storage/v1/table.proto | 164 --- .../v1/big_query_read.create_read_session.js | 74 -- .../generated/v1/big_query_read.read_rows.js | 66 -- .../v1/big_query_read.split_read_stream.js | 68 -- .../v1/big_query_write.append_rows.js | 85 -- ..._query_write.batch_commit_write_streams.js | 64 - .../v1/big_query_write.create_write_stream.js | 64 - .../big_query_write.finalize_write_stream.js | 59 - .../v1/big_query_write.flush_rows.js | 63 - .../v1/big_query_write.get_write_stream.js | 59 - ...data.google.cloud.bigquery.storage.v1.json | 415 ------- owl-bot-staging/v1/src/index.ts | 27 - .../v1/src/v1/big_query_read_client.ts | 816 ------------- .../src/v1/big_query_read_client_config.json | 44 - .../v1/src/v1/big_query_read_proto_list.json | 8 - .../v1/src/v1/big_query_write_client.ts | 1028 ----------------- .../src/v1/big_query_write_client_config.json | 59 - .../v1/src/v1/big_query_write_proto_list.json | 8 - owl-bot-staging/v1/src/v1/gapic_metadata.json | 117 -- owl-bot-staging/v1/src/v1/index.ts | 20 - .../system-test/fixtures/sample/src/index.js | 28 - .../system-test/fixtures/sample/src/index.ts | 38 - owl-bot-staging/v1/system-test/install.ts | 49 - .../v1/test/gapic_big_query_read_v1.ts | 669 ----------- .../v1/test/gapic_big_query_write_v1.ts | 921 --------------- owl-bot-staging/v1/tsconfig.json | 19 - owl-bot-staging/v1/webpack.config.js | 64 - owl-bot-staging/v1beta1/.eslintignore | 7 - owl-bot-staging/v1beta1/.eslintrc.json | 3 - owl-bot-staging/v1beta1/.gitignore | 14 - owl-bot-staging/v1beta1/.jsdoc.js | 55 - owl-bot-staging/v1beta1/.mocharc.js | 33 - owl-bot-staging/v1beta1/.prettierrc.js | 22 - owl-bot-staging/v1beta1/README.md | 1 - .../v1beta1/linkinator.config.json | 16 - owl-bot-staging/v1beta1/package.json | 64 - .../bigquery/storage/v1beta1/arrow.proto | 36 - .../cloud/bigquery/storage/v1beta1/avro.proto | 37 - .../storage/v1beta1/read_options.proto | 39 - .../bigquery/storage/v1beta1/storage.proto | 405 ------- .../storage/v1beta1/table_reference.proto | 42 - ...orage.batch_create_read_session_streams.js | 66 -- .../big_query_storage.create_read_session.js | 91 -- .../big_query_storage.finalize_stream.js | 58 - .../v1beta1/big_query_storage.read_rows.js | 62 - .../big_query_storage.split_read_stream.js | 68 -- ...google.cloud.bigquery.storage.v1beta1.json | 247 ---- owl-bot-staging/v1beta1/src/index.ts | 25 - .../src/v1beta1/big_query_storage_client.ts | 852 -------------- .../big_query_storage_client_config.json | 54 - .../v1beta1/big_query_storage_proto_list.json | 7 - .../v1beta1/src/v1beta1/gapic_metadata.json | 68 -- owl-bot-staging/v1beta1/src/v1beta1/index.ts | 19 - .../system-test/fixtures/sample/src/index.js | 27 - .../system-test/fixtures/sample/src/index.ts | 32 - .../v1beta1/system-test/install.ts | 49 - .../test/gapic_big_query_storage_v1beta1.ts | 781 ------------- owl-bot-staging/v1beta1/tsconfig.json | 19 - owl-bot-staging/v1beta1/webpack.config.js | 64 - src/v1/big_query_read_client.ts | 3 +- src/v1/big_query_write_client.ts | 3 +- src/v1beta1/big_query_storage_client.ts | 3 +- 76 files changed, 6 insertions(+), 9672 deletions(-) delete mode 100644 owl-bot-staging/v1/.eslintignore delete mode 100644 owl-bot-staging/v1/.eslintrc.json delete mode 100644 owl-bot-staging/v1/.gitignore delete mode 100644 owl-bot-staging/v1/.jsdoc.js delete mode 100644 owl-bot-staging/v1/.mocharc.js delete mode 100644 owl-bot-staging/v1/.prettierrc.js delete mode 100644 owl-bot-staging/v1/README.md delete mode 100644 owl-bot-staging/v1/linkinator.config.json delete mode 100644 owl-bot-staging/v1/package.json delete mode 100644 owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/arrow.proto delete mode 100644 owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/avro.proto delete mode 100644 owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/protobuf.proto delete mode 100644 owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/storage.proto delete mode 100644 owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/stream.proto delete mode 100644 owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/table.proto delete mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_read.create_read_session.js delete mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_read.read_rows.js delete mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_read.split_read_stream.js delete mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_write.append_rows.js delete mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_write.batch_commit_write_streams.js delete mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_write.create_write_stream.js delete mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_write.finalize_write_stream.js delete mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_write.flush_rows.js delete mode 100644 owl-bot-staging/v1/samples/generated/v1/big_query_write.get_write_stream.js delete mode 100644 owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json delete mode 100644 owl-bot-staging/v1/src/index.ts delete mode 100644 owl-bot-staging/v1/src/v1/big_query_read_client.ts delete mode 100644 owl-bot-staging/v1/src/v1/big_query_read_client_config.json delete mode 100644 owl-bot-staging/v1/src/v1/big_query_read_proto_list.json delete mode 100644 owl-bot-staging/v1/src/v1/big_query_write_client.ts delete mode 100644 owl-bot-staging/v1/src/v1/big_query_write_client_config.json delete mode 100644 owl-bot-staging/v1/src/v1/big_query_write_proto_list.json delete mode 100644 owl-bot-staging/v1/src/v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/src/v1/index.ts delete mode 100644 owl-bot-staging/v1/system-test/fixtures/sample/src/index.js delete mode 100644 owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts delete mode 100644 owl-bot-staging/v1/system-test/install.ts delete mode 100644 owl-bot-staging/v1/test/gapic_big_query_read_v1.ts delete mode 100644 owl-bot-staging/v1/test/gapic_big_query_write_v1.ts delete mode 100644 owl-bot-staging/v1/tsconfig.json delete mode 100644 owl-bot-staging/v1/webpack.config.js delete mode 100644 owl-bot-staging/v1beta1/.eslintignore delete mode 100644 owl-bot-staging/v1beta1/.eslintrc.json delete mode 100644 owl-bot-staging/v1beta1/.gitignore delete mode 100644 owl-bot-staging/v1beta1/.jsdoc.js delete mode 100644 owl-bot-staging/v1beta1/.mocharc.js delete mode 100644 owl-bot-staging/v1beta1/.prettierrc.js delete mode 100644 owl-bot-staging/v1beta1/README.md delete mode 100644 owl-bot-staging/v1beta1/linkinator.config.json delete mode 100644 owl-bot-staging/v1beta1/package.json delete mode 100644 owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto delete mode 100644 owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/avro.proto delete mode 100644 owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto delete mode 100644 owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/storage.proto delete mode 100644 owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto delete mode 100644 owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js delete mode 100644 owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.create_read_session.js delete mode 100644 owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.finalize_stream.js delete mode 100644 owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.read_rows.js delete mode 100644 owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.split_read_stream.js delete mode 100644 owl-bot-staging/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.bigquery.storage.v1beta1.json delete mode 100644 owl-bot-staging/v1beta1/src/index.ts delete mode 100644 owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client.ts delete mode 100644 owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client_config.json delete mode 100644 owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_proto_list.json delete mode 100644 owl-bot-staging/v1beta1/src/v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/src/v1beta1/index.ts delete mode 100644 owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.js delete mode 100644 owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.ts delete mode 100644 owl-bot-staging/v1beta1/system-test/install.ts delete mode 100644 owl-bot-staging/v1beta1/test/gapic_big_query_storage_v1beta1.ts delete mode 100644 owl-bot-staging/v1beta1/tsconfig.json delete mode 100644 owl-bot-staging/v1beta1/webpack.config.js diff --git a/owl-bot-staging/v1/.eslintignore b/owl-bot-staging/v1/.eslintignore deleted file mode 100644 index cfc348ec..00000000 --- a/owl-bot-staging/v1/.eslintignore +++ /dev/null @@ -1,7 +0,0 @@ -**/node_modules -**/.coverage -build/ -docs/ -protos/ -system-test/ -samples/generated/ diff --git a/owl-bot-staging/v1/.eslintrc.json b/owl-bot-staging/v1/.eslintrc.json deleted file mode 100644 index 78215349..00000000 --- a/owl-bot-staging/v1/.eslintrc.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "extends": "./node_modules/gts" -} diff --git a/owl-bot-staging/v1/.gitignore b/owl-bot-staging/v1/.gitignore deleted file mode 100644 index 5d32b237..00000000 --- a/owl-bot-staging/v1/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -**/*.log -**/node_modules -.coverage -coverage -.nyc_output -docs/ -out/ -build/ -system-test/secrets.js -system-test/*key.json -*.lock -.DS_Store -package-lock.json -__pycache__ diff --git a/owl-bot-staging/v1/.jsdoc.js b/owl-bot-staging/v1/.jsdoc.js deleted file mode 100644 index 21870f2a..00000000 --- a/owl-bot-staging/v1/.jsdoc.js +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -'use strict'; - -module.exports = { - opts: { - readme: './README.md', - package: './package.json', - template: './node_modules/jsdoc-fresh', - recurse: true, - verbose: true, - destination: './docs/' - }, - plugins: [ - 'plugins/markdown', - 'jsdoc-region-tag' - ], - source: { - excludePattern: '(^|\\/|\\\\)[._]', - include: [ - 'build/src', - 'protos' - ], - includePattern: '\\.js$' - }, - templates: { - copyright: 'Copyright 2022 Google LLC', - includeDate: false, - sourceFiles: false, - systemName: '@google-cloud/bigquery-storage', - theme: 'lumen', - default: { - outputSourceFiles: false - } - }, - markdown: { - idInHeadings: true - } -}; diff --git a/owl-bot-staging/v1/.mocharc.js b/owl-bot-staging/v1/.mocharc.js deleted file mode 100644 index 481c522b..00000000 --- a/owl-bot-staging/v1/.mocharc.js +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -const config = { - "enable-source-maps": true, - "throw-deprecation": true, - "timeout": 10000 -} -if (process.env.MOCHA_THROW_DEPRECATION === 'false') { - delete config['throw-deprecation']; -} -if (process.env.MOCHA_REPORTER) { - config.reporter = process.env.MOCHA_REPORTER; -} -if (process.env.MOCHA_REPORTER_OUTPUT) { - config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; -} -module.exports = config diff --git a/owl-bot-staging/v1/.prettierrc.js b/owl-bot-staging/v1/.prettierrc.js deleted file mode 100644 index 494e1478..00000000 --- a/owl-bot-staging/v1/.prettierrc.js +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -module.exports = { - ...require('gts/.prettierrc.json') -} diff --git a/owl-bot-staging/v1/README.md b/owl-bot-staging/v1/README.md deleted file mode 100644 index f5dcfbaf..00000000 --- a/owl-bot-staging/v1/README.md +++ /dev/null @@ -1 +0,0 @@ -Storage: Nodejs Client diff --git a/owl-bot-staging/v1/linkinator.config.json b/owl-bot-staging/v1/linkinator.config.json deleted file mode 100644 index befd23c8..00000000 --- a/owl-bot-staging/v1/linkinator.config.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "recurse": true, - "skip": [ - "https://codecov.io/gh/googleapis/", - "www.googleapis.com", - "img.shields.io", - "https://console.cloud.google.com/cloudshell", - "https://support.google.com" - ], - "silent": true, - "concurrency": 5, - "retry": true, - "retryErrors": true, - "retryErrorsCount": 5, - "retryErrorsJitter": 3000 -} diff --git a/owl-bot-staging/v1/package.json b/owl-bot-staging/v1/package.json deleted file mode 100644 index 240c0964..00000000 --- a/owl-bot-staging/v1/package.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "name": "@google-cloud/bigquery-storage", - "version": "0.1.0", - "description": "Storage client for Node.js", - "repository": "googleapis/nodejs-storage", - "license": "Apache-2.0", - "author": "Google LLC", - "main": "build/src/index.js", - "files": [ - "build/src", - "build/protos" - ], - "keywords": [ - "google apis client", - "google api client", - "google apis", - "google api", - "google", - "google cloud platform", - "google cloud", - "cloud", - "google storage", - "storage", - "big query read", - "big query write" - ], - "scripts": { - "clean": "gts clean", - "compile": "tsc -p . && cp -r protos build/", - "compile-protos": "compileProtos src", - "docs": "jsdoc -c .jsdoc.js", - "predocs-test": "npm run docs", - "docs-test": "linkinator docs", - "fix": "gts fix", - "lint": "gts check", - "prepare": "npm run compile-protos && npm run compile", - "system-test": "c8 mocha build/system-test", - "test": "c8 mocha build/test" - }, - "dependencies": { - "google-gax": "^2.29.4" - }, - "devDependencies": { - "@types/mocha": "^9.1.0", - "@types/node": "^16.0.0", - "@types/sinon": "^10.0.8", - "c8": "^7.11.0", - "gts": "^3.1.0", - "jsdoc": "^3.6.7", - "jsdoc-fresh": "^1.1.1", - "jsdoc-region-tag": "^1.3.1", - "linkinator": "^3.0.0", - "mocha": "^9.1.4", - "null-loader": "^4.0.1", - "pack-n-play": "^1.0.0-2", - "sinon": "^13.0.0", - "ts-loader": "^9.2.6", - "typescript": "^4.5.5", - "webpack": "^5.67.0", - "webpack-cli": "^4.9.1" - }, - "engines": { - "node": ">=v10.24.0" - } -} diff --git a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/arrow.proto b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/arrow.proto deleted file mode 100644 index 6d3f6080..00000000 --- a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/arrow.proto +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1; - -option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; -option java_multiple_files = true; -option java_outer_classname = "ArrowProto"; -option java_package = "com.google.cloud.bigquery.storage.v1"; -option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; - -// Arrow schema as specified in -// https://arrow.apache.org/docs/python/api/datatypes.html -// and serialized to bytes using IPC: -// https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc -// -// See code samples on how this message can be deserialized. -message ArrowSchema { - // IPC serialized Arrow schema. - bytes serialized_schema = 1; -} - -// Arrow RecordBatch. -message ArrowRecordBatch { - // IPC-serialized Arrow RecordBatch. - bytes serialized_record_batch = 1; - - // [Deprecated] The count of rows in `serialized_record_batch`. - // Please use the format-independent ReadRowsResponse.row_count instead. - int64 row_count = 2 [deprecated = true]; -} - -// Contains options specific to Arrow Serialization. -message ArrowSerializationOptions { - // Compression codec's supported by Arrow. - enum CompressionCodec { - // If unspecified no compression will be used. - COMPRESSION_UNSPECIFIED = 0; - - // LZ4 Frame (https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md) - LZ4_FRAME = 1; - - // Zstandard compression. - ZSTD = 2; - } - - // The compression codec to use for Arrow buffers in serialized record - // batches. - CompressionCodec buffer_compression = 2; -} diff --git a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/avro.proto b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/avro.proto deleted file mode 100644 index 15de2db5..00000000 --- a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/avro.proto +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1; - -option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; -option java_multiple_files = true; -option java_outer_classname = "AvroProto"; -option java_package = "com.google.cloud.bigquery.storage.v1"; -option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; - -// Avro schema. -message AvroSchema { - // Json serialized schema, as described at - // https://avro.apache.org/docs/1.8.1/spec.html. - string schema = 1; -} - -// Avro rows. -message AvroRows { - // Binary serialized rows in a block. - bytes serialized_binary_rows = 1; - - // [Deprecated] The count of rows in the returning block. - // Please use the format-independent ReadRowsResponse.row_count instead. - int64 row_count = 2 [deprecated = true]; -} diff --git a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/protobuf.proto b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/protobuf.proto deleted file mode 100644 index b3754acf..00000000 --- a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/protobuf.proto +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1; - -import "google/protobuf/descriptor.proto"; - -option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; -option java_multiple_files = true; -option java_outer_classname = "ProtoBufProto"; -option java_package = "com.google.cloud.bigquery.storage.v1"; -option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; - -// ProtoSchema describes the schema of the serialized protocol buffer data rows. -message ProtoSchema { - // Descriptor for input message. The provided descriptor must be self - // contained, such that data rows sent can be fully decoded using only the - // single descriptor. For data rows that are compositions of multiple - // independent messages, this means the descriptor may need to be transformed - // to only use nested types: - // https://developers.google.com/protocol-buffers/docs/proto#nested - // - // For additional information for how proto types and values map onto BigQuery - // see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions - google.protobuf.DescriptorProto proto_descriptor = 1; -} - -message ProtoRows { - // A sequence of rows serialized as a Protocol Buffer. - // - // See https://developers.google.com/protocol-buffers/docs/overview for more - // information on deserializing this field. - repeated bytes serialized_rows = 1; -} diff --git a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/storage.proto b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/storage.proto deleted file mode 100644 index 67c6c8a0..00000000 --- a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/storage.proto +++ /dev/null @@ -1,624 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/cloud/bigquery/storage/v1/arrow.proto"; -import "google/cloud/bigquery/storage/v1/avro.proto"; -import "google/cloud/bigquery/storage/v1/protobuf.proto"; -import "google/cloud/bigquery/storage/v1/stream.proto"; -import "google/cloud/bigquery/storage/v1/table.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; -option java_multiple_files = true; -option java_outer_classname = "StorageProto"; -option java_package = "com.google.cloud.bigquery.storage.v1"; -option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; -option (google.api.resource_definition) = { - type: "bigquery.googleapis.com/Table" - pattern: "projects/{project}/datasets/{dataset}/tables/{table}" -}; - -// BigQuery Read API. -// -// The Read API can be used to read data from BigQuery. -service BigQueryRead { - option (google.api.default_host) = "bigquerystorage.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/bigquery," - "https://www.googleapis.com/auth/cloud-platform"; - - // Creates a new read session. A read session divides the contents of a - // BigQuery table into one or more streams, which can then be used to read - // data from the table. The read session also specifies properties of the - // data to be read, such as a list of columns or a push-down filter describing - // the rows to be returned. - // - // A particular row can be read by at most one stream. When the caller has - // reached the end of each stream in the session, then all the data in the - // table has been read. - // - // Data is assigned to each stream such that roughly the same number of - // rows can be read from each stream. Because the server-side unit for - // assigning data is collections of rows, the API does not guarantee that - // each stream will return the same number or rows. Additionally, the - // limits are enforced based on the number of pre-filtered rows, so some - // filters can lead to lopsided assignments. - // - // Read sessions automatically expire 6 hours after they are created and do - // not require manual clean-up by the caller. - rpc CreateReadSession(CreateReadSessionRequest) returns (ReadSession) { - option (google.api.http) = { - post: "/v1/{read_session.table=projects/*/datasets/*/tables/*}" - body: "*" - }; - option (google.api.method_signature) = "parent,read_session,max_stream_count"; - } - - // Reads rows from the stream in the format prescribed by the ReadSession. - // Each response contains one or more table rows, up to a maximum of 100 MiB - // per response; read requests which attempt to read individual rows larger - // than 100 MiB will fail. - // - // Each request also returns a set of stream statistics reflecting the current - // state of the stream. - rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { - option (google.api.http) = { - get: "/v1/{read_stream=projects/*/locations/*/sessions/*/streams/*}" - }; - option (google.api.method_signature) = "read_stream,offset"; - } - - // Splits a given `ReadStream` into two `ReadStream` objects. These - // `ReadStream` objects are referred to as the primary and the residual - // streams of the split. The original `ReadStream` can still be read from in - // the same manner as before. Both of the returned `ReadStream` objects can - // also be read from, and the rows returned by both child streams will be - // the same as the rows read from the original stream. - // - // Moreover, the two child streams will be allocated back-to-back in the - // original `ReadStream`. Concretely, it is guaranteed that for streams - // original, primary, and residual, that original[0-j] = primary[0-j] and - // original[j-n] = residual[0-m] once the streams have been read to - // completion. - rpc SplitReadStream(SplitReadStreamRequest) returns (SplitReadStreamResponse) { - option (google.api.http) = { - get: "/v1/{name=projects/*/locations/*/sessions/*/streams/*}" - }; - } -} - -// BigQuery Write API. -// -// The Write API can be used to write data to BigQuery. -// -// For supplementary information about the Write API, see: -// https://cloud.google.com/bigquery/docs/write-api -service BigQueryWrite { - option (google.api.default_host) = "bigquerystorage.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/bigquery," - "https://www.googleapis.com/auth/bigquery.insertdata," - "https://www.googleapis.com/auth/cloud-platform"; - - // Creates a write stream to the given table. - // Additionally, every table has a special stream named '_default' - // to which data can be written. This stream doesn't need to be created using - // CreateWriteStream. It is a stream that can be used simultaneously by any - // number of clients. Data written to this stream is considered committed as - // soon as an acknowledgement is received. - rpc CreateWriteStream(CreateWriteStreamRequest) returns (WriteStream) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/datasets/*/tables/*}" - body: "write_stream" - }; - option (google.api.method_signature) = "parent,write_stream"; - } - - // Appends data to the given stream. - // - // If `offset` is specified, the `offset` is checked against the end of - // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an - // attempt is made to append to an offset beyond the current end of the stream - // or `ALREADY_EXISTS` if user provides an `offset` that has already been - // written to. User can retry with adjusted offset within the same RPC - // connection. If `offset` is not specified, append happens at the end of the - // stream. - // - // The response contains an optional offset at which the append - // happened. No offset information will be returned for appends to a - // default stream. - // - // Responses are received in the same order in which requests are sent. - // There will be one response for each successful inserted request. Responses - // may optionally embed error information if the originating AppendRequest was - // not successfully processed. - // - // The specifics of when successfully appended data is made visible to the - // table are governed by the type of stream: - // - // * For COMMITTED streams (which includes the default stream), data is - // visible immediately upon successful append. - // - // * For BUFFERED streams, data is made visible via a subsequent `FlushRows` - // rpc which advances a cursor to a newer offset in the stream. - // - // * For PENDING streams, data is not made visible until the stream itself is - // finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly - // committed via the `BatchCommitWriteStreams` rpc. - // - // Note: For users coding against the gRPC api directly, it may be - // necessary to supply the x-goog-request-params system parameter - // with `write_stream=`. - // - // More information about system parameters: - // https://cloud.google.com/apis/docs/system-parameters - rpc AppendRows(stream AppendRowsRequest) returns (stream AppendRowsResponse) { - option (google.api.http) = { - post: "/v1/{write_stream=projects/*/datasets/*/tables/*/streams/*}" - body: "*" - }; - option (google.api.method_signature) = "write_stream"; - } - - // Gets information about a write stream. - rpc GetWriteStream(GetWriteStreamRequest) returns (WriteStream) { - option (google.api.http) = { - post: "/v1/{name=projects/*/datasets/*/tables/*/streams/*}" - body: "*" - }; - option (google.api.method_signature) = "name"; - } - - // Finalize a write stream so that no new data can be appended to the - // stream. Finalize is not supported on the '_default' stream. - rpc FinalizeWriteStream(FinalizeWriteStreamRequest) returns (FinalizeWriteStreamResponse) { - option (google.api.http) = { - post: "/v1/{name=projects/*/datasets/*/tables/*/streams/*}" - body: "*" - }; - option (google.api.method_signature) = "name"; - } - - // Atomically commits a group of `PENDING` streams that belong to the same - // `parent` table. - // - // Streams must be finalized before commit and cannot be committed multiple - // times. Once a stream is committed, data in the stream becomes available - // for read operations. - rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest) returns (BatchCommitWriteStreamsResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*/datasets/*/tables/*}" - }; - option (google.api.method_signature) = "parent"; - } - - // Flushes rows to a BUFFERED stream. - // - // If users are appending rows to BUFFERED stream, flush operation is - // required in order for the rows to become available for reading. A - // Flush operation flushes up to any previously flushed offset in a BUFFERED - // stream, to the offset specified in the request. - // - // Flush is not supported on the _default stream, since it is not BUFFERED. - rpc FlushRows(FlushRowsRequest) returns (FlushRowsResponse) { - option (google.api.http) = { - post: "/v1/{write_stream=projects/*/datasets/*/tables/*/streams/*}" - body: "*" - }; - option (google.api.method_signature) = "write_stream"; - } -} - -// Request message for `CreateReadSession`. -message CreateReadSessionRequest { - // Required. The request project that owns the session, in the form of - // `projects/{project_id}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - // Required. Session to be created. - ReadSession read_session = 2 [(google.api.field_behavior) = REQUIRED]; - - // Max initial number of streams. If unset or zero, the server will - // provide a value of streams so as to produce reasonable throughput. Must be - // non-negative. The number of streams may be lower than the requested number, - // depending on the amount parallelism that is reasonable for the table. Error - // will be returned if the max count is greater than the current system - // max limit of 1,000. - // - // Streams must be read starting from offset 0. - int32 max_stream_count = 3; -} - -// Request message for `ReadRows`. -message ReadRowsRequest { - // Required. Stream to read rows from. - string read_stream = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquerystorage.googleapis.com/ReadStream" - } - ]; - - // The offset requested must be less than the last row read from Read. - // Requesting a larger offset is undefined. If not specified, start reading - // from offset zero. - int64 offset = 2; -} - -// Information on if the current connection is being throttled. -message ThrottleState { - // How much this connection is being throttled. Zero means no throttling, - // 100 means fully throttled. - int32 throttle_percent = 1; -} - -// Estimated stream statistics for a given read Stream. -message StreamStats { - message Progress { - // The fraction of rows assigned to the stream that have been processed by - // the server so far, not including the rows in the current response - // message. - // - // This value, along with `at_response_end`, can be used to interpolate - // the progress made as the rows in the message are being processed using - // the following formula: `at_response_start + (at_response_end - - // at_response_start) * rows_processed_from_response / rows_in_response`. - // - // Note that if a filter is provided, the `at_response_end` value of the - // previous response may not necessarily be equal to the - // `at_response_start` value of the current response. - double at_response_start = 1; - - // Similar to `at_response_start`, except that this value includes the - // rows in the current response. - double at_response_end = 2; - } - - // Represents the progress of the current stream. - Progress progress = 2; -} - -// Response from calling `ReadRows` may include row data, progress and -// throttling information. -message ReadRowsResponse { - // Row data is returned in format specified during session creation. - oneof rows { - // Serialized row data in AVRO format. - AvroRows avro_rows = 3; - - // Serialized row data in Arrow RecordBatch format. - ArrowRecordBatch arrow_record_batch = 4; - } - - // Number of serialized rows in the rows block. - int64 row_count = 6; - - // Statistics for the stream. - StreamStats stats = 2; - - // Throttling state. If unset, the latest response still describes - // the current throttling status. - ThrottleState throttle_state = 5; - - // The schema for the read. If read_options.selected_fields is set, the - // schema may be different from the table schema as it will only contain - // the selected fields. This schema is equivelant to the one returned by - // CreateSession. This field is only populated in the first ReadRowsResponse - // RPC. - oneof schema { - // Output only. Avro schema. - AvroSchema avro_schema = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Arrow schema. - ArrowSchema arrow_schema = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; - } -} - -// Request message for `SplitReadStream`. -message SplitReadStreamRequest { - // Required. Name of the stream to split. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquerystorage.googleapis.com/ReadStream" - } - ]; - - // A value in the range (0.0, 1.0) that specifies the fractional point at - // which the original stream should be split. The actual split point is - // evaluated on pre-filtered rows, so if a filter is provided, then there is - // no guarantee that the division of the rows between the new child streams - // will be proportional to this fractional value. Additionally, because the - // server-side unit for assigning data is collections of rows, this fraction - // will always map to a data storage boundary on the server side. - double fraction = 2; -} - -// Response message for `SplitReadStream`. -message SplitReadStreamResponse { - // Primary stream, which contains the beginning portion of - // |original_stream|. An empty value indicates that the original stream can no - // longer be split. - ReadStream primary_stream = 1; - - // Remainder stream, which contains the tail of |original_stream|. An empty - // value indicates that the original stream can no longer be split. - ReadStream remainder_stream = 2; -} - -// Request message for `CreateWriteStream`. -message CreateWriteStreamRequest { - // Required. Reference to the table to which the stream belongs, in the format - // of `projects/{project}/datasets/{dataset}/tables/{table}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquery.googleapis.com/Table" - } - ]; - - // Required. Stream to be created. - WriteStream write_stream = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Request message for `AppendRows`. -// -// Due to the nature of AppendRows being a bidirectional streaming RPC, certain -// parts of the AppendRowsRequest need only be specified for the first request -// sent each time the gRPC network connection is opened/reopened. -message AppendRowsRequest { - // ProtoData contains the data rows and schema when constructing append - // requests. - message ProtoData { - // Proto schema used to serialize the data. This value only needs to be - // provided as part of the first request on a gRPC network connection, - // and will be ignored for subsequent requests on the connection. - ProtoSchema writer_schema = 1; - - // Serialized row data in protobuf message format. - // Currently, the backend expects the serialized rows to adhere to - // proto2 semantics when appending rows, particularly with respect to - // how default values are encoded. - ProtoRows rows = 2; - } - - // Required. The write_stream identifies the target of the append operation, and only - // needs to be specified as part of the first request on the gRPC connection. - // If provided for subsequent requests, it must match the value of the first - // request. - // - // For explicitly created write streams, the format is: - // - // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}` - // - // For the special default stream, the format is: - // - // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`. - string write_stream = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquerystorage.googleapis.com/WriteStream" - } - ]; - - // If present, the write is only performed if the next append offset is same - // as the provided value. If not present, the write is performed at the - // current end of stream. Specifying a value for this field is not allowed - // when calling AppendRows for the '_default' stream. - google.protobuf.Int64Value offset = 2; - - // Input rows. The `writer_schema` field must be specified at the initial - // request and currently, it will be ignored if specified in following - // requests. Following requests must have data in the same format as the - // initial request. - oneof rows { - // Rows in proto format. - ProtoData proto_rows = 4; - } - - // Id set by client to annotate its identity. Only initial request setting is - // respected. - string trace_id = 6; -} - -// Response message for `AppendRows`. -message AppendRowsResponse { - // AppendResult is returned for successful append requests. - message AppendResult { - // The row offset at which the last append occurred. The offset will not be - // set if appending using default streams. - google.protobuf.Int64Value offset = 1; - } - - oneof response { - // Result if the append is successful. - AppendResult append_result = 1; - - // Error returned when problems were encountered. If present, - // it indicates rows were not accepted into the system. - // Users can retry or continue with other append requests within the - // same connection. - // - // Additional information about error signalling: - // - // ALREADY_EXISTS: Happens when an append specified an offset, and the - // backend already has received data at this offset. Typically encountered - // in retry scenarios, and can be ignored. - // - // OUT_OF_RANGE: Returned when the specified offset in the stream is beyond - // the current end of the stream. - // - // INVALID_ARGUMENT: Indicates a malformed request or data. - // - // ABORTED: Request processing is aborted because of prior failures. The - // request can be retried if previous failure is addressed. - // - // INTERNAL: Indicates server side error(s) that can be retried. - google.rpc.Status error = 2; - } - - // If backend detects a schema update, pass it to user so that user can - // use it to input new type of message. It will be empty when no schema - // updates have occurred. - TableSchema updated_schema = 3; -} - -// Request message for `GetWriteStreamRequest`. -message GetWriteStreamRequest { - // Required. Name of the stream to get, in the form of - // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquerystorage.googleapis.com/WriteStream" - } - ]; -} - -// Request message for `BatchCommitWriteStreams`. -message BatchCommitWriteStreamsRequest { - // Required. Parent table that all the streams should belong to, in the form of - // `projects/{project}/datasets/{dataset}/tables/{table}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquery.googleapis.com/Table" - } - ]; - - // Required. The group of streams that will be committed atomically. - repeated string write_streams = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Response message for `BatchCommitWriteStreams`. -message BatchCommitWriteStreamsResponse { - // The time at which streams were committed in microseconds granularity. - // This field will only exist when there are no stream errors. - // **Note** if this field is not set, it means the commit was not successful. - google.protobuf.Timestamp commit_time = 1; - - // Stream level error if commit failed. Only streams with error will be in - // the list. - // If empty, there is no error and all streams are committed successfully. - // If non empty, certain streams have errors and ZERO stream is committed due - // to atomicity guarantee. - repeated StorageError stream_errors = 2; -} - -// Request message for invoking `FinalizeWriteStream`. -message FinalizeWriteStreamRequest { - // Required. Name of the stream to finalize, in the form of - // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquerystorage.googleapis.com/WriteStream" - } - ]; -} - -// Response message for `FinalizeWriteStream`. -message FinalizeWriteStreamResponse { - // Number of rows in the finalized stream. - int64 row_count = 1; -} - -// Request message for `FlushRows`. -message FlushRowsRequest { - // Required. The stream that is the target of the flush operation. - string write_stream = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquerystorage.googleapis.com/WriteStream" - } - ]; - - // Ending offset of the flush operation. Rows before this offset(including - // this offset) will be flushed. - google.protobuf.Int64Value offset = 2; -} - -// Respond message for `FlushRows`. -message FlushRowsResponse { - // The rows before this offset (including this offset) are flushed. - int64 offset = 1; -} - -// Structured custom BigQuery Storage error message. The error can be attached -// as error details in the returned rpc Status. In particular, the use of error -// codes allows more structured error handling, and reduces the need to evaluate -// unstructured error text strings. -message StorageError { - // Error code for `StorageError`. - enum StorageErrorCode { - // Default error. - STORAGE_ERROR_CODE_UNSPECIFIED = 0; - - // Table is not found in the system. - TABLE_NOT_FOUND = 1; - - // Stream is already committed. - STREAM_ALREADY_COMMITTED = 2; - - // Stream is not found. - STREAM_NOT_FOUND = 3; - - // Invalid Stream type. - // For example, you try to commit a stream that is not pending. - INVALID_STREAM_TYPE = 4; - - // Invalid Stream state. - // For example, you try to commit a stream that is not finalized or is - // garbaged. - INVALID_STREAM_STATE = 5; - - // Stream is finalized. - STREAM_FINALIZED = 6; - - // There is a schema mismatch and it is caused by user schema has extra - // field than bigquery schema. - SCHEMA_MISMATCH_EXTRA_FIELDS = 7; - - // Offset already exists. - OFFSET_ALREADY_EXISTS = 8; - - // Offset out of range. - OFFSET_OUT_OF_RANGE = 9; - } - - // BigQuery Storage specific error code. - StorageErrorCode code = 1; - - // Name of the failed entity. - string entity = 2; - - // Message that describes the error. - string error_message = 3; -} diff --git a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/stream.proto b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/stream.proto deleted file mode 100644 index bd1fa2ce..00000000 --- a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/stream.proto +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1; - -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/cloud/bigquery/storage/v1/arrow.proto"; -import "google/cloud/bigquery/storage/v1/avro.proto"; -import "google/cloud/bigquery/storage/v1/table.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; -option java_multiple_files = true; -option java_outer_classname = "StreamProto"; -option java_package = "com.google.cloud.bigquery.storage.v1"; -option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; - -// Data format for input or output data. -enum DataFormat { - DATA_FORMAT_UNSPECIFIED = 0; - - // Avro is a standard open source row based file format. - // See https://avro.apache.org/ for more details. - AVRO = 1; - - // Arrow is a standard open source column-based message format. - // See https://arrow.apache.org/ for more details. - ARROW = 2; -} - -// Information about the ReadSession. -message ReadSession { - option (google.api.resource) = { - type: "bigquerystorage.googleapis.com/ReadSession" - pattern: "projects/{project}/locations/{location}/sessions/{session}" - }; - - // Additional attributes when reading a table. - message TableModifiers { - // The snapshot time of the table. If not set, interpreted as now. - google.protobuf.Timestamp snapshot_time = 1; - } - - // Options dictating how we read a table. - message TableReadOptions { - // Names of the fields in the table that should be read. If empty, all - // fields will be read. If the specified field is a nested field, all - // the sub-fields in the field will be selected. The output field order is - // unrelated to the order of fields in selected_fields. - repeated string selected_fields = 1; - - // SQL text filtering statement, similar to a WHERE clause in a query. - // Aggregates are not supported. - // - // Examples: "int_field > 5" - // "date_field = CAST('2014-9-27' as DATE)" - // "nullable_field is not NULL" - // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" - // "numeric_field BETWEEN 1.0 AND 5.0" - // - // Restricted to a maximum length for 1 MB. - string row_restriction = 2; - - oneof output_format_serialization_options { - // Optional. Options specific to the Apache Arrow output format. - ArrowSerializationOptions arrow_serialization_options = 3 [(google.api.field_behavior) = OPTIONAL]; - } - } - - // Output only. Unique identifier for the session, in the form - // `projects/{project_id}/locations/{location}/sessions/{session_id}`. - string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Time at which the session becomes invalid. After this time, subsequent - // requests to read this Session will return errors. The expire_time is - // automatically assigned and currently cannot be specified or updated. - google.protobuf.Timestamp expire_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Immutable. Data format of the output data. - DataFormat data_format = 3 [(google.api.field_behavior) = IMMUTABLE]; - - // The schema for the read. If read_options.selected_fields is set, the - // schema may be different from the table schema as it will only contain - // the selected fields. - oneof schema { - // Output only. Avro schema. - AvroSchema avro_schema = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Arrow schema. - ArrowSchema arrow_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; - } - - // Immutable. Table that this ReadSession is reading from, in the form - // `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}` - string table = 6 [ - (google.api.field_behavior) = IMMUTABLE, - (google.api.resource_reference) = { - type: "bigquery.googleapis.com/Table" - } - ]; - - // Optional. Any modifiers which are applied when reading from the specified table. - TableModifiers table_modifiers = 7 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Read options for this session (e.g. column selection, filters). - TableReadOptions read_options = 8 [(google.api.field_behavior) = OPTIONAL]; - - // Output only. A list of streams created with the session. - // - // At least one stream is created with the session. In the future, larger - // request_stream_count values *may* result in this list being unpopulated, - // in that case, the user will need to use a List method to get the streams - // instead, which is not yet available. - repeated ReadStream streams = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. An estimate on the number of bytes this session will scan when - // all streams are completely consumed. This estimate is based on - // metadata from the table which might be incomplete or stale. - int64 estimated_total_bytes_scanned = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. ID set by client to annotate a session identity. This does not need - // to be strictly unique, but instead the same ID should be used to group - // logically connected sessions (e.g. All using the same ID for all sessions - // needed to complete a Spark SQL query is reasonable). - // - // Maximum length is 256 bytes. - string trace_id = 13 [(google.api.field_behavior) = OPTIONAL]; -} - -// Information about a single stream that gets data out of the storage system. -// Most of the information about `ReadStream` instances is aggregated, making -// `ReadStream` lightweight. -message ReadStream { - option (google.api.resource) = { - type: "bigquerystorage.googleapis.com/ReadStream" - pattern: "projects/{project}/locations/{location}/sessions/{session}/streams/{stream}" - }; - - // Output only. Name of the stream, in the form - // `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`. - string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Information about a single stream that gets data inside the storage system. -message WriteStream { - option (google.api.resource) = { - type: "bigquerystorage.googleapis.com/WriteStream" - pattern: "projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}" - }; - - // Type enum of the stream. - enum Type { - // Unknown type. - TYPE_UNSPECIFIED = 0; - - // Data will commit automatically and appear as soon as the write is - // acknowledged. - COMMITTED = 1; - - // Data is invisible until the stream is committed. - PENDING = 2; - - // Data is only visible up to the offset to which it was flushed. - BUFFERED = 3; - } - - // Mode enum of the stream. - enum WriteMode { - // Unknown type. - WRITE_MODE_UNSPECIFIED = 0; - - // Insert new records into the table. - // It is the default value if customers do not specify it. - INSERT = 1; - } - - // Output only. Name of the stream, in the form - // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Immutable. Type of the stream. - Type type = 2 [(google.api.field_behavior) = IMMUTABLE]; - - // Output only. Create time of the stream. For the _default stream, this is the - // creation_time of the table. - google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Commit time of the stream. - // If a stream is of `COMMITTED` type, then it will have a commit_time same as - // `create_time`. If the stream is of `PENDING` type, empty commit_time - // means it is not committed. - google.protobuf.Timestamp commit_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The schema of the destination table. It is only returned in - // `CreateWriteStream` response. Caller should generate data that's - // compatible with this schema to send in initial `AppendRowsRequest`. - // The table schema could go out of date during the life time of the stream. - TableSchema table_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Immutable. Mode of the stream. - WriteMode write_mode = 7 [(google.api.field_behavior) = IMMUTABLE]; -} diff --git a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/table.proto b/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/table.proto deleted file mode 100644 index 545f6292..00000000 --- a/owl-bot-staging/v1/protos/google/cloud/bigquery/storage/v1/table.proto +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1; - -import "google/api/field_behavior.proto"; - -option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1"; -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1;storage"; -option java_multiple_files = true; -option java_outer_classname = "TableProto"; -option java_package = "com.google.cloud.bigquery.storage.v1"; -option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; - -// Schema of a table. -message TableSchema { - // Describes the fields in a table. - repeated TableFieldSchema fields = 1; -} - -// TableFieldSchema defines a single field/column within a table schema. -message TableFieldSchema { - enum Type { - // Illegal value - TYPE_UNSPECIFIED = 0; - - // 64K, UTF8 - STRING = 1; - - // 64-bit signed - INT64 = 2; - - // 64-bit IEEE floating point - DOUBLE = 3; - - // Aggregate type - STRUCT = 4; - - // 64K, Binary - BYTES = 5; - - // 2-valued - BOOL = 6; - - // 64-bit signed usec since UTC epoch - TIMESTAMP = 7; - - // Civil date - Year, Month, Day - DATE = 8; - - // Civil time - Hour, Minute, Second, Microseconds - TIME = 9; - - // Combination of civil date and civil time - DATETIME = 10; - - // Geography object - GEOGRAPHY = 11; - - // Numeric value - NUMERIC = 12; - - // BigNumeric value - BIGNUMERIC = 13; - - // Interval - INTERVAL = 14; - - // JSON, String - JSON = 15; - } - - enum Mode { - // Illegal value - MODE_UNSPECIFIED = 0; - - NULLABLE = 1; - - REQUIRED = 2; - - REPEATED = 3; - } - - // Required. The field name. The name must contain only letters (a-z, A-Z), - // numbers (0-9), or underscores (_), and must start with a letter or - // underscore. The maximum length is 128 characters. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The field data type. - Type type = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The field mode. The default value is NULLABLE. - Mode mode = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Describes the nested schema fields if the type property is set to STRUCT. - repeated TableFieldSchema fields = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The field description. The maximum length is 1,024 characters. - string description = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Maximum length of values of this field for STRINGS or BYTES. - // - // If max_length is not specified, no maximum length constraint is imposed - // on this field. - // - // If type = "STRING", then max_length represents the maximum UTF-8 - // length of strings in this field. - // - // If type = "BYTES", then max_length represents the maximum number of - // bytes in this field. - // - // It is invalid to set this field if type is not "STRING" or "BYTES". - int64 max_length = 7 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Precision (maximum number of total digits in base 10) and scale - // (maximum number of digits in the fractional part in base 10) constraints - // for values of this field for NUMERIC or BIGNUMERIC. - // - // It is invalid to set precision or scale if type is not "NUMERIC" or - // "BIGNUMERIC". - // - // If precision and scale are not specified, no value range constraint is - // imposed on this field insofar as values are permitted by the type. - // - // Values of this NUMERIC or BIGNUMERIC field must be in this range when: - // - // * Precision (P) and scale (S) are specified: - // [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)] - // * Precision (P) is specified but not scale (and thus scale is - // interpreted to be equal to zero): - // [-10^P + 1, 10^P - 1]. - // - // Acceptable values for precision and scale if both are specified: - // - // * If type = "NUMERIC": - // 1 <= precision - scale <= 29 and 0 <= scale <= 9. - // * If type = "BIGNUMERIC": - // 1 <= precision - scale <= 38 and 0 <= scale <= 38. - // - // Acceptable values for precision if only precision is specified but not - // scale (and thus scale is interpreted to be equal to zero): - // - // * If type = "NUMERIC": 1 <= precision <= 29. - // * If type = "BIGNUMERIC": 1 <= precision <= 38. - // - // If scale is specified but not precision, then it is invalid. - int64 precision = 8 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. See documentation for precision. - int64 scale = 9 [(google.api.field_behavior) = OPTIONAL]; -} diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_read.create_read_session.js b/owl-bot-staging/v1/samples/generated/v1/big_query_read.create_read_session.js deleted file mode 100644 index f4d2f950..00000000 --- a/owl-bot-staging/v1/samples/generated/v1/big_query_read.create_read_session.js +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(parent, readSession) { - // [START bigquerystorage_v1_generated_BigQueryRead_CreateReadSession_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The request project that owns the session, in the form of - * `projects/{project_id}`. - */ - // const parent = 'abc123' - /** - * Required. Session to be created. - */ - // const readSession = {} - /** - * Max initial number of streams. If unset or zero, the server will - * provide a value of streams so as to produce reasonable throughput. Must be - * non-negative. The number of streams may be lower than the requested number, - * depending on the amount parallelism that is reasonable for the table. Error - * will be returned if the max count is greater than the current system - * max limit of 1,000. - * Streams must be read starting from offset 0. - */ - // const maxStreamCount = 1234 - - // Imports the Storage library - const {BigQueryReadClient} = require('@google-cloud/bigquery-storage').v1; - - // Instantiates a client - const storageClient = new BigQueryReadClient(); - - async function callCreateReadSession() { - // Construct request - const request = { - parent, - readSession, - }; - - // Run request - const response = await storageClient.createReadSession(request); - console.log(response); - } - - callCreateReadSession(); - // [END bigquerystorage_v1_generated_BigQueryRead_CreateReadSession_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_read.read_rows.js b/owl-bot-staging/v1/samples/generated/v1/big_query_read.read_rows.js deleted file mode 100644 index f5f781a4..00000000 --- a/owl-bot-staging/v1/samples/generated/v1/big_query_read.read_rows.js +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(readStream) { - // [START bigquerystorage_v1_generated_BigQueryRead_ReadRows_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. Stream to read rows from. - */ - // const readStream = 'abc123' - /** - * The offset requested must be less than the last row read from Read. - * Requesting a larger offset is undefined. If not specified, start reading - * from offset zero. - */ - // const offset = 1234 - - // Imports the Storage library - const {BigQueryReadClient} = require('@google-cloud/bigquery-storage').v1; - - // Instantiates a client - const storageClient = new BigQueryReadClient(); - - async function callReadRows() { - // Construct request - const request = { - readStream, - }; - - // Run request - const stream = await storageClient.readRows(request); - stream.on('data', (response) => { console.log(response) }); - stream.on('error', (err) => { throw(err) }); - stream.on('end', () => { /* API call completed */ }); - } - - callReadRows(); - // [END bigquerystorage_v1_generated_BigQueryRead_ReadRows_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_read.split_read_stream.js b/owl-bot-staging/v1/samples/generated/v1/big_query_read.split_read_stream.js deleted file mode 100644 index 6e146955..00000000 --- a/owl-bot-staging/v1/samples/generated/v1/big_query_read.split_read_stream.js +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(name) { - // [START bigquerystorage_v1_generated_BigQueryRead_SplitReadStream_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. Name of the stream to split. - */ - // const name = 'abc123' - /** - * A value in the range (0.0, 1.0) that specifies the fractional point at - * which the original stream should be split. The actual split point is - * evaluated on pre-filtered rows, so if a filter is provided, then there is - * no guarantee that the division of the rows between the new child streams - * will be proportional to this fractional value. Additionally, because the - * server-side unit for assigning data is collections of rows, this fraction - * will always map to a data storage boundary on the server side. - */ - // const fraction = 1234 - - // Imports the Storage library - const {BigQueryReadClient} = require('@google-cloud/bigquery-storage').v1; - - // Instantiates a client - const storageClient = new BigQueryReadClient(); - - async function callSplitReadStream() { - // Construct request - const request = { - name, - }; - - // Run request - const response = await storageClient.splitReadStream(request); - console.log(response); - } - - callSplitReadStream(); - // [END bigquerystorage_v1_generated_BigQueryRead_SplitReadStream_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_write.append_rows.js b/owl-bot-staging/v1/samples/generated/v1/big_query_write.append_rows.js deleted file mode 100644 index 9cefbb22..00000000 --- a/owl-bot-staging/v1/samples/generated/v1/big_query_write.append_rows.js +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(writeStream) { - // [START bigquerystorage_v1_generated_BigQueryWrite_AppendRows_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The write_stream identifies the target of the append operation, and only - * needs to be specified as part of the first request on the gRPC connection. - * If provided for subsequent requests, it must match the value of the first - * request. - * For explicitly created write streams, the format is: - * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}` - * For the special default stream, the format is: - * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`. - */ - // const writeStream = 'abc123' - /** - * If present, the write is only performed if the next append offset is same - * as the provided value. If not present, the write is performed at the - * current end of stream. Specifying a value for this field is not allowed - * when calling AppendRows for the '_default' stream. - */ - // const offset = {} - /** - * Rows in proto format. - */ - // const protoRows = {} - /** - * Id set by client to annotate its identity. Only initial request setting is - * respected. - */ - // const traceId = 'abc123' - - // Imports the Storage library - const {BigQueryWriteClient} = require('@google-cloud/bigquery-storage').v1; - - // Instantiates a client - const storageClient = new BigQueryWriteClient(); - - async function callAppendRows() { - // Construct request - const request = { - writeStream, - }; - - // Run request - const stream = await storageClient.appendRows(); - stream.on('data', (response) => { console.log(response) }); - stream.on('error', (err) => { throw(err) }); - stream.on('end', () => { /* API call completed */ }); - stream.write(request); - stream.end(); - } - - callAppendRows(); - // [END bigquerystorage_v1_generated_BigQueryWrite_AppendRows_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_write.batch_commit_write_streams.js b/owl-bot-staging/v1/samples/generated/v1/big_query_write.batch_commit_write_streams.js deleted file mode 100644 index 9c258a30..00000000 --- a/owl-bot-staging/v1/samples/generated/v1/big_query_write.batch_commit_write_streams.js +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(parent, writeStreams) { - // [START bigquerystorage_v1_generated_BigQueryWrite_BatchCommitWriteStreams_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. Parent table that all the streams should belong to, in the form of - * `projects/{project}/datasets/{dataset}/tables/{table}`. - */ - // const parent = 'abc123' - /** - * Required. The group of streams that will be committed atomically. - */ - // const writeStreams = 'abc123' - - // Imports the Storage library - const {BigQueryWriteClient} = require('@google-cloud/bigquery-storage').v1; - - // Instantiates a client - const storageClient = new BigQueryWriteClient(); - - async function callBatchCommitWriteStreams() { - // Construct request - const request = { - parent, - writeStreams, - }; - - // Run request - const response = await storageClient.batchCommitWriteStreams(request); - console.log(response); - } - - callBatchCommitWriteStreams(); - // [END bigquerystorage_v1_generated_BigQueryWrite_BatchCommitWriteStreams_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_write.create_write_stream.js b/owl-bot-staging/v1/samples/generated/v1/big_query_write.create_write_stream.js deleted file mode 100644 index 839a8bf6..00000000 --- a/owl-bot-staging/v1/samples/generated/v1/big_query_write.create_write_stream.js +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(parent, writeStream) { - // [START bigquerystorage_v1_generated_BigQueryWrite_CreateWriteStream_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. Reference to the table to which the stream belongs, in the format - * of `projects/{project}/datasets/{dataset}/tables/{table}`. - */ - // const parent = 'abc123' - /** - * Required. Stream to be created. - */ - // const writeStream = {} - - // Imports the Storage library - const {BigQueryWriteClient} = require('@google-cloud/bigquery-storage').v1; - - // Instantiates a client - const storageClient = new BigQueryWriteClient(); - - async function callCreateWriteStream() { - // Construct request - const request = { - parent, - writeStream, - }; - - // Run request - const response = await storageClient.createWriteStream(request); - console.log(response); - } - - callCreateWriteStream(); - // [END bigquerystorage_v1_generated_BigQueryWrite_CreateWriteStream_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_write.finalize_write_stream.js b/owl-bot-staging/v1/samples/generated/v1/big_query_write.finalize_write_stream.js deleted file mode 100644 index 3ff3da7a..00000000 --- a/owl-bot-staging/v1/samples/generated/v1/big_query_write.finalize_write_stream.js +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(name) { - // [START bigquerystorage_v1_generated_BigQueryWrite_FinalizeWriteStream_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. Name of the stream to finalize, in the form of - * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - */ - // const name = 'abc123' - - // Imports the Storage library - const {BigQueryWriteClient} = require('@google-cloud/bigquery-storage').v1; - - // Instantiates a client - const storageClient = new BigQueryWriteClient(); - - async function callFinalizeWriteStream() { - // Construct request - const request = { - name, - }; - - // Run request - const response = await storageClient.finalizeWriteStream(request); - console.log(response); - } - - callFinalizeWriteStream(); - // [END bigquerystorage_v1_generated_BigQueryWrite_FinalizeWriteStream_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_write.flush_rows.js b/owl-bot-staging/v1/samples/generated/v1/big_query_write.flush_rows.js deleted file mode 100644 index 751fdebf..00000000 --- a/owl-bot-staging/v1/samples/generated/v1/big_query_write.flush_rows.js +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(writeStream) { - // [START bigquerystorage_v1_generated_BigQueryWrite_FlushRows_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. The stream that is the target of the flush operation. - */ - // const writeStream = 'abc123' - /** - * Ending offset of the flush operation. Rows before this offset(including - * this offset) will be flushed. - */ - // const offset = {} - - // Imports the Storage library - const {BigQueryWriteClient} = require('@google-cloud/bigquery-storage').v1; - - // Instantiates a client - const storageClient = new BigQueryWriteClient(); - - async function callFlushRows() { - // Construct request - const request = { - writeStream, - }; - - // Run request - const response = await storageClient.flushRows(request); - console.log(response); - } - - callFlushRows(); - // [END bigquerystorage_v1_generated_BigQueryWrite_FlushRows_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/big_query_write.get_write_stream.js b/owl-bot-staging/v1/samples/generated/v1/big_query_write.get_write_stream.js deleted file mode 100644 index 16507508..00000000 --- a/owl-bot-staging/v1/samples/generated/v1/big_query_write.get_write_stream.js +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(name) { - // [START bigquerystorage_v1_generated_BigQueryWrite_GetWriteStream_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. Name of the stream to get, in the form of - * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - */ - // const name = 'abc123' - - // Imports the Storage library - const {BigQueryWriteClient} = require('@google-cloud/bigquery-storage').v1; - - // Instantiates a client - const storageClient = new BigQueryWriteClient(); - - async function callGetWriteStream() { - // Construct request - const request = { - name, - }; - - // Run request - const response = await storageClient.getWriteStream(request); - console.log(response); - } - - callGetWriteStream(); - // [END bigquerystorage_v1_generated_BigQueryWrite_GetWriteStream_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json b/owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json deleted file mode 100644 index a847f738..00000000 --- a/owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json +++ /dev/null @@ -1,415 +0,0 @@ -{ - "clientLibrary": { - "name": "nodejs-storage", - "version": "0.1.0", - "language": "TYPESCRIPT", - "apis": [ - { - "id": "google.cloud.bigquery.storage.v1", - "version": "v1" - } - ] - }, - "snippets": [ - { - "regionTag": "bigquerystorage_v1_generated_BigQueryRead_CreateReadSession_async", - "title": "BigQueryRead createReadSession Sample", - "origin": "API_DEFINITION", - "description": " Creates a new read session. A read session divides the contents of a BigQuery table into one or more streams, which can then be used to read data from the table. The read session also specifies properties of the data to be read, such as a list of columns or a push-down filter describing the rows to be returned. A particular row can be read by at most one stream. When the caller has reached the end of each stream in the session, then all the data in the table has been read. Data is assigned to each stream such that roughly the same number of rows can be read from each stream. Because the server-side unit for assigning data is collections of rows, the API does not guarantee that each stream will return the same number or rows. Additionally, the limits are enforced based on the number of pre-filtered rows, so some filters can lead to lopsided assignments. Read sessions automatically expire 6 hours after they are created and do not require manual clean-up by the caller.", - "canonical": true, - "file": "big_query_read.create_read_session.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 66, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "CreateReadSession", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.CreateReadSession", - "async": true, - "parameters": [ - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "read_session", - "type": ".google.cloud.bigquery.storage.v1.ReadSession" - }, - { - "name": "max_stream_count", - "type": "TYPE_INT32" - } - ], - "resultType": ".google.cloud.bigquery.storage.v1.ReadSession", - "client": { - "shortName": "BigQueryReadClient", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryReadClient" - }, - "method": { - "shortName": "CreateReadSession", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.CreateReadSession", - "service": { - "shortName": "BigQueryRead", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead" - } - } - } - }, - { - "regionTag": "bigquerystorage_v1_generated_BigQueryRead_ReadRows_async", - "title": "BigQueryRead readRows Sample", - "origin": "API_DEFINITION", - "description": " Reads rows from the stream in the format prescribed by the ReadSession. Each response contains one or more table rows, up to a maximum of 100 MiB per response; read requests which attempt to read individual rows larger than 100 MiB will fail. Each request also returns a set of stream statistics reflecting the current state of the stream.", - "canonical": true, - "file": "big_query_read.read_rows.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 58, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "ReadRows", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.ReadRows", - "async": true, - "parameters": [ - { - "name": "read_stream", - "type": "TYPE_STRING" - }, - { - "name": "offset", - "type": "TYPE_INT64" - } - ], - "resultType": ".google.cloud.bigquery.storage.v1.ReadRowsResponse", - "client": { - "shortName": "BigQueryReadClient", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryReadClient" - }, - "method": { - "shortName": "ReadRows", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.ReadRows", - "service": { - "shortName": "BigQueryRead", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead" - } - } - } - }, - { - "regionTag": "bigquerystorage_v1_generated_BigQueryRead_SplitReadStream_async", - "title": "BigQueryRead splitReadStream Sample", - "origin": "API_DEFINITION", - "description": " Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are referred to as the primary and the residual streams of the split. The original `ReadStream` can still be read from in the same manner as before. Both of the returned `ReadStream` objects can also be read from, and the rows returned by both child streams will be the same as the rows read from the original stream. Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. Concretely, it is guaranteed that for streams original, primary, and residual, that original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read to completion.", - "canonical": true, - "file": "big_query_read.split_read_stream.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 60, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "SplitReadStream", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.SplitReadStream", - "async": true, - "parameters": [ - { - "name": "name", - "type": "TYPE_STRING" - }, - { - "name": "fraction", - "type": "TYPE_DOUBLE" - } - ], - "resultType": ".google.cloud.bigquery.storage.v1.SplitReadStreamResponse", - "client": { - "shortName": "BigQueryReadClient", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryReadClient" - }, - "method": { - "shortName": "SplitReadStream", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.SplitReadStream", - "service": { - "shortName": "BigQueryRead", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead" - } - } - } - }, - { - "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_CreateWriteStream_async", - "title": "BigQueryRead createWriteStream Sample", - "origin": "API_DEFINITION", - "description": " Creates a write stream to the given table. Additionally, every table has a special stream named '_default' to which data can be written. This stream doesn't need to be created using CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. Data written to this stream is considered committed as soon as an acknowledgement is received.", - "canonical": true, - "file": "big_query_write.create_write_stream.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 56, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "CreateWriteStream", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.CreateWriteStream", - "async": true, - "parameters": [ - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "write_stream", - "type": ".google.cloud.bigquery.storage.v1.WriteStream" - } - ], - "resultType": ".google.cloud.bigquery.storage.v1.WriteStream", - "client": { - "shortName": "BigQueryWriteClient", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" - }, - "method": { - "shortName": "CreateWriteStream", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.CreateWriteStream", - "service": { - "shortName": "BigQueryWrite", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" - } - } - } - }, - { - "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_AppendRows_async", - "title": "BigQueryRead appendRows Sample", - "origin": "API_DEFINITION", - "description": " Appends data to the given stream. If `offset` is specified, the `offset` is checked against the end of stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an attempt is made to append to an offset beyond the current end of the stream or `ALREADY_EXISTS` if user provides an `offset` that has already been written to. User can retry with adjusted offset within the same RPC connection. If `offset` is not specified, append happens at the end of the stream. The response contains an optional offset at which the append happened. No offset information will be returned for appends to a default stream. Responses are received in the same order in which requests are sent. There will be one response for each successful inserted request. Responses may optionally embed error information if the originating AppendRequest was not successfully processed. The specifics of when successfully appended data is made visible to the table are governed by the type of stream: * For COMMITTED streams (which includes the default stream), data is visible immediately upon successful append. * For BUFFERED streams, data is made visible via a subsequent `FlushRows` rpc which advances a cursor to a newer offset in the stream. * For PENDING streams, data is not made visible until the stream itself is finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly committed via the `BatchCommitWriteStreams` rpc. Note: For users coding against the gRPC api directly, it may be necessary to supply the x-goog-request-params system parameter with `write_stream=`. More information about system parameters: https://cloud.google.com/apis/docs/system-parameters", - "canonical": true, - "file": "big_query_write.append_rows.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 77, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "AppendRows", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.AppendRows", - "async": true, - "parameters": [ - { - "name": "write_stream", - "type": "TYPE_STRING" - }, - { - "name": "offset", - "type": ".google.protobuf.Int64Value" - }, - { - "name": "proto_rows", - "type": ".google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData" - }, - { - "name": "trace_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.bigquery.storage.v1.AppendRowsResponse", - "client": { - "shortName": "BigQueryWriteClient", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" - }, - "method": { - "shortName": "AppendRows", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.AppendRows", - "service": { - "shortName": "BigQueryWrite", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" - } - } - } - }, - { - "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_GetWriteStream_async", - "title": "BigQueryRead getWriteStream Sample", - "origin": "API_DEFINITION", - "description": " Gets information about a write stream.", - "canonical": true, - "file": "big_query_write.get_write_stream.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 51, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "GetWriteStream", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.GetWriteStream", - "async": true, - "parameters": [ - { - "name": "name", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.bigquery.storage.v1.WriteStream", - "client": { - "shortName": "BigQueryWriteClient", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" - }, - "method": { - "shortName": "GetWriteStream", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.GetWriteStream", - "service": { - "shortName": "BigQueryWrite", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" - } - } - } - }, - { - "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_FinalizeWriteStream_async", - "title": "BigQueryRead finalizeWriteStream Sample", - "origin": "API_DEFINITION", - "description": " Finalize a write stream so that no new data can be appended to the stream. Finalize is not supported on the '_default' stream.", - "canonical": true, - "file": "big_query_write.finalize_write_stream.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 51, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "FinalizeWriteStream", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FinalizeWriteStream", - "async": true, - "parameters": [ - { - "name": "name", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse", - "client": { - "shortName": "BigQueryWriteClient", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" - }, - "method": { - "shortName": "FinalizeWriteStream", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FinalizeWriteStream", - "service": { - "shortName": "BigQueryWrite", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" - } - } - } - }, - { - "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_BatchCommitWriteStreams_async", - "title": "BigQueryRead batchCommitWriteStreams Sample", - "origin": "API_DEFINITION", - "description": " Atomically commits a group of `PENDING` streams that belong to the same `parent` table. Streams must be finalized before commit and cannot be committed multiple times. Once a stream is committed, data in the stream becomes available for read operations.", - "canonical": true, - "file": "big_query_write.batch_commit_write_streams.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 56, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "BatchCommitWriteStreams", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.BatchCommitWriteStreams", - "async": true, - "parameters": [ - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "write_streams", - "type": "TYPE_STRING[]" - } - ], - "resultType": ".google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse", - "client": { - "shortName": "BigQueryWriteClient", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" - }, - "method": { - "shortName": "BatchCommitWriteStreams", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.BatchCommitWriteStreams", - "service": { - "shortName": "BigQueryWrite", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" - } - } - } - }, - { - "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_FlushRows_async", - "title": "BigQueryRead flushRows Sample", - "origin": "API_DEFINITION", - "description": " Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush operation is required in order for the rows to become available for reading. A Flush operation flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in the request. Flush is not supported on the _default stream, since it is not BUFFERED.", - "canonical": true, - "file": "big_query_write.flush_rows.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 55, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "FlushRows", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FlushRows", - "async": true, - "parameters": [ - { - "name": "write_stream", - "type": "TYPE_STRING" - }, - { - "name": "offset", - "type": ".google.protobuf.Int64Value" - } - ], - "resultType": ".google.cloud.bigquery.storage.v1.FlushRowsResponse", - "client": { - "shortName": "BigQueryWriteClient", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWriteClient" - }, - "method": { - "shortName": "FlushRows", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FlushRows", - "service": { - "shortName": "BigQueryWrite", - "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite" - } - } - } - } - ] -} diff --git a/owl-bot-staging/v1/src/index.ts b/owl-bot-staging/v1/src/index.ts deleted file mode 100644 index b0d739d4..00000000 --- a/owl-bot-staging/v1/src/index.ts +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as v1 from './v1'; -const BigQueryReadClient = v1.BigQueryReadClient; -type BigQueryReadClient = v1.BigQueryReadClient; -const BigQueryWriteClient = v1.BigQueryWriteClient; -type BigQueryWriteClient = v1.BigQueryWriteClient; -export {v1, BigQueryReadClient, BigQueryWriteClient}; -export default {v1, BigQueryReadClient, BigQueryWriteClient}; -import * as protos from '../protos/protos'; -export {protos} diff --git a/owl-bot-staging/v1/src/v1/big_query_read_client.ts b/owl-bot-staging/v1/src/v1/big_query_read_client.ts deleted file mode 100644 index 2f22944c..00000000 --- a/owl-bot-staging/v1/src/v1/big_query_read_client.ts +++ /dev/null @@ -1,816 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import * as gax from 'google-gax'; -import {Callback, CallOptions, Descriptors, ClientOptions, GoogleError} from 'google-gax'; - -import { PassThrough } from 'stream'; -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1/big_query_read_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './big_query_read_client_config.json'; - -const version = require('../../../package.json').version; - -/** - * BigQuery Read API. - * - * The Read API can be used to read data from BigQuery. - * @class - * @memberof v1 - */ -export class BigQueryReadClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - pathTemplates: {[name: string]: gax.PathTemplate}; - bigQueryReadStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of BigQueryReadClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean} [options.fallback] - Use HTTP fallback mode. - * In fallback mode, a special browser-compatible transport implementation is used - * instead of gRPC transport. In browser context (if the `window` object is defined) - * the fallback mode is enabled automatically; set `options.fallback` to `false` - * if you need to override this behavior. - */ - constructor(opts?: ClientOptions) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof BigQueryReadClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gax.fallback : gax; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - // This API contains "path templates"; forward-slash-separated - // identifiers to uniquely identify resources within the API. - // Create useful helper objects for these. - this.pathTemplates = { - projectPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}' - ), - readSessionPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/sessions/{session}' - ), - readStreamPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/sessions/{session}/streams/{stream}' - ), - tablePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/datasets/{dataset}/tables/{table}' - ), - writeStreamPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}' - ), - }; - - // Some of the methods on this service provide streaming responses. - // Provide descriptors for these. - this.descriptors.stream = { - readRows: new this._gaxModule.StreamDescriptor(gax.StreamType.SERVER_STREAMING, opts.fallback === 'rest') - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.bigquery.storage.v1.BigQueryRead', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = gax.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.bigQueryReadStub) { - return this.bigQueryReadStub; - } - - // Put together the "service stub" for - // google.cloud.bigquery.storage.v1.BigQueryRead. - this.bigQueryReadStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.storage.v1.BigQueryRead') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.bigquery.storage.v1.BigQueryRead, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const bigQueryReadStubMethods = - ['createReadSession', 'readRows', 'splitReadStream']; - for (const methodName of bigQueryReadStubMethods) { - const callPromise = this.bigQueryReadStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - if (methodName in this.descriptors.stream) { - const stream = new PassThrough(); - setImmediate(() => { - stream.emit('error', new GoogleError('The client has already been closed.')); - }); - return stream; - } - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.stream[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.bigQueryReadStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'bigquerystorage.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'bigquerystorage.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/bigquery', - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- -/** - * Creates a new read session. A read session divides the contents of a - * BigQuery table into one or more streams, which can then be used to read - * data from the table. The read session also specifies properties of the - * data to be read, such as a list of columns or a push-down filter describing - * the rows to be returned. - * - * A particular row can be read by at most one stream. When the caller has - * reached the end of each stream in the session, then all the data in the - * table has been read. - * - * Data is assigned to each stream such that roughly the same number of - * rows can be read from each stream. Because the server-side unit for - * assigning data is collections of rows, the API does not guarantee that - * each stream will return the same number or rows. Additionally, the - * limits are enforced based on the number of pre-filtered rows, so some - * filters can lead to lopsided assignments. - * - * Read sessions automatically expire 6 hours after they are created and do - * not require manual clean-up by the caller. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. The request project that owns the session, in the form of - * `projects/{project_id}`. - * @param {google.cloud.bigquery.storage.v1.ReadSession} request.readSession - * Required. Session to be created. - * @param {number} request.maxStreamCount - * Max initial number of streams. If unset or zero, the server will - * provide a value of streams so as to produce reasonable throughput. Must be - * non-negative. The number of streams may be lower than the requested number, - * depending on the amount parallelism that is reasonable for the table. Error - * will be returned if the max count is greater than the current system - * max limit of 1,000. - * - * Streams must be read starting from offset 0. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [ReadSession]{@link google.cloud.bigquery.storage.v1.ReadSession}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/big_query_read.create_read_session.js - * region_tag:bigquerystorage_v1_generated_BigQueryRead_CreateReadSession_async - */ - createReadSession( - request?: protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.bigquery.storage.v1.IReadSession, - protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest|undefined, {}|undefined - ]>; - createReadSession( - request: protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.IReadSession, - protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest|null|undefined, - {}|null|undefined>): void; - createReadSession( - request: protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.IReadSession, - protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest|null|undefined, - {}|null|undefined>): void; - createReadSession( - request?: protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.bigquery.storage.v1.IReadSession, - protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.bigquery.storage.v1.IReadSession, - protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.bigquery.storage.v1.IReadSession, - protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = gax.routingHeader.fromParams({ - 'read_session.table': request.readSession!.table || '', - }); - this.initialize(); - return this.innerApiCalls.createReadSession(request, options, callback); - } -/** - * Splits a given `ReadStream` into two `ReadStream` objects. These - * `ReadStream` objects are referred to as the primary and the residual - * streams of the split. The original `ReadStream` can still be read from in - * the same manner as before. Both of the returned `ReadStream` objects can - * also be read from, and the rows returned by both child streams will be - * the same as the rows read from the original stream. - * - * Moreover, the two child streams will be allocated back-to-back in the - * original `ReadStream`. Concretely, it is guaranteed that for streams - * original, primary, and residual, that original[0-j] = primary[0-j] and - * original[j-n] = residual[0-m] once the streams have been read to - * completion. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.name - * Required. Name of the stream to split. - * @param {number} request.fraction - * A value in the range (0.0, 1.0) that specifies the fractional point at - * which the original stream should be split. The actual split point is - * evaluated on pre-filtered rows, so if a filter is provided, then there is - * no guarantee that the division of the rows between the new child streams - * will be proportional to this fractional value. Additionally, because the - * server-side unit for assigning data is collections of rows, this fraction - * will always map to a data storage boundary on the server side. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [SplitReadStreamResponse]{@link google.cloud.bigquery.storage.v1.SplitReadStreamResponse}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/big_query_read.split_read_stream.js - * region_tag:bigquerystorage_v1_generated_BigQueryRead_SplitReadStream_async - */ - splitReadStream( - request?: protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, - protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest|undefined, {}|undefined - ]>; - splitReadStream( - request: protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, - protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest|null|undefined, - {}|null|undefined>): void; - splitReadStream( - request: protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, - protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest|null|undefined, - {}|null|undefined>): void; - splitReadStream( - request?: protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, - protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, - protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, - protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = gax.routingHeader.fromParams({ - 'name': request.name || '', - }); - this.initialize(); - return this.innerApiCalls.splitReadStream(request, options, callback); - } - -/** - * Reads rows from the stream in the format prescribed by the ReadSession. - * Each response contains one or more table rows, up to a maximum of 100 MiB - * per response; read requests which attempt to read individual rows larger - * than 100 MiB will fail. - * - * Each request also returns a set of stream statistics reflecting the current - * state of the stream. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.readStream - * Required. Stream to read rows from. - * @param {number} request.offset - * The offset requested must be less than the last row read from Read. - * Requesting a larger offset is undefined. If not specified, start reading - * from offset zero. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Stream} - * An object stream which emits [ReadRowsResponse]{@link google.cloud.bigquery.storage.v1.ReadRowsResponse} on 'data' event. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#server-streaming) - * for more details and examples. - * @example include:samples/generated/v1/big_query_read.read_rows.js - * region_tag:bigquerystorage_v1_generated_BigQueryRead_ReadRows_async - */ - readRows( - request?: protos.google.cloud.bigquery.storage.v1.IReadRowsRequest, - options?: CallOptions): - gax.CancellableStream{ - request = request || {}; - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = gax.routingHeader.fromParams({ - 'read_stream': request.readStream || '', - }); - this.initialize(); - return this.innerApiCalls.readRows(request, options); - } - - // -------------------- - // -- Path templates -- - // -------------------- - - /** - * Return a fully-qualified project resource name string. - * - * @param {string} project - * @returns {string} Resource name string. - */ - projectPath(project:string) { - return this.pathTemplates.projectPathTemplate.render({ - project: project, - }); - } - - /** - * Parse the project from Project resource. - * - * @param {string} projectName - * A fully-qualified path representing Project resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectName(projectName: string) { - return this.pathTemplates.projectPathTemplate.match(projectName).project; - } - - /** - * Return a fully-qualified readSession resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} session - * @returns {string} Resource name string. - */ - readSessionPath(project:string,location:string,session:string) { - return this.pathTemplates.readSessionPathTemplate.render({ - project: project, - location: location, - session: session, - }); - } - - /** - * Parse the project from ReadSession resource. - * - * @param {string} readSessionName - * A fully-qualified path representing ReadSession resource. - * @returns {string} A string representing the project. - */ - matchProjectFromReadSessionName(readSessionName: string) { - return this.pathTemplates.readSessionPathTemplate.match(readSessionName).project; - } - - /** - * Parse the location from ReadSession resource. - * - * @param {string} readSessionName - * A fully-qualified path representing ReadSession resource. - * @returns {string} A string representing the location. - */ - matchLocationFromReadSessionName(readSessionName: string) { - return this.pathTemplates.readSessionPathTemplate.match(readSessionName).location; - } - - /** - * Parse the session from ReadSession resource. - * - * @param {string} readSessionName - * A fully-qualified path representing ReadSession resource. - * @returns {string} A string representing the session. - */ - matchSessionFromReadSessionName(readSessionName: string) { - return this.pathTemplates.readSessionPathTemplate.match(readSessionName).session; - } - - /** - * Return a fully-qualified readStream resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} session - * @param {string} stream - * @returns {string} Resource name string. - */ - readStreamPath(project:string,location:string,session:string,stream:string) { - return this.pathTemplates.readStreamPathTemplate.render({ - project: project, - location: location, - session: session, - stream: stream, - }); - } - - /** - * Parse the project from ReadStream resource. - * - * @param {string} readStreamName - * A fully-qualified path representing ReadStream resource. - * @returns {string} A string representing the project. - */ - matchProjectFromReadStreamName(readStreamName: string) { - return this.pathTemplates.readStreamPathTemplate.match(readStreamName).project; - } - - /** - * Parse the location from ReadStream resource. - * - * @param {string} readStreamName - * A fully-qualified path representing ReadStream resource. - * @returns {string} A string representing the location. - */ - matchLocationFromReadStreamName(readStreamName: string) { - return this.pathTemplates.readStreamPathTemplate.match(readStreamName).location; - } - - /** - * Parse the session from ReadStream resource. - * - * @param {string} readStreamName - * A fully-qualified path representing ReadStream resource. - * @returns {string} A string representing the session. - */ - matchSessionFromReadStreamName(readStreamName: string) { - return this.pathTemplates.readStreamPathTemplate.match(readStreamName).session; - } - - /** - * Parse the stream from ReadStream resource. - * - * @param {string} readStreamName - * A fully-qualified path representing ReadStream resource. - * @returns {string} A string representing the stream. - */ - matchStreamFromReadStreamName(readStreamName: string) { - return this.pathTemplates.readStreamPathTemplate.match(readStreamName).stream; - } - - /** - * Return a fully-qualified table resource name string. - * - * @param {string} project - * @param {string} dataset - * @param {string} table - * @returns {string} Resource name string. - */ - tablePath(project:string,dataset:string,table:string) { - return this.pathTemplates.tablePathTemplate.render({ - project: project, - dataset: dataset, - table: table, - }); - } - - /** - * Parse the project from Table resource. - * - * @param {string} tableName - * A fully-qualified path representing Table resource. - * @returns {string} A string representing the project. - */ - matchProjectFromTableName(tableName: string) { - return this.pathTemplates.tablePathTemplate.match(tableName).project; - } - - /** - * Parse the dataset from Table resource. - * - * @param {string} tableName - * A fully-qualified path representing Table resource. - * @returns {string} A string representing the dataset. - */ - matchDatasetFromTableName(tableName: string) { - return this.pathTemplates.tablePathTemplate.match(tableName).dataset; - } - - /** - * Parse the table from Table resource. - * - * @param {string} tableName - * A fully-qualified path representing Table resource. - * @returns {string} A string representing the table. - */ - matchTableFromTableName(tableName: string) { - return this.pathTemplates.tablePathTemplate.match(tableName).table; - } - - /** - * Return a fully-qualified writeStream resource name string. - * - * @param {string} project - * @param {string} dataset - * @param {string} table - * @param {string} stream - * @returns {string} Resource name string. - */ - writeStreamPath(project:string,dataset:string,table:string,stream:string) { - return this.pathTemplates.writeStreamPathTemplate.render({ - project: project, - dataset: dataset, - table: table, - stream: stream, - }); - } - - /** - * Parse the project from WriteStream resource. - * - * @param {string} writeStreamName - * A fully-qualified path representing WriteStream resource. - * @returns {string} A string representing the project. - */ - matchProjectFromWriteStreamName(writeStreamName: string) { - return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).project; - } - - /** - * Parse the dataset from WriteStream resource. - * - * @param {string} writeStreamName - * A fully-qualified path representing WriteStream resource. - * @returns {string} A string representing the dataset. - */ - matchDatasetFromWriteStreamName(writeStreamName: string) { - return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).dataset; - } - - /** - * Parse the table from WriteStream resource. - * - * @param {string} writeStreamName - * A fully-qualified path representing WriteStream resource. - * @returns {string} A string representing the table. - */ - matchTableFromWriteStreamName(writeStreamName: string) { - return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).table; - } - - /** - * Parse the stream from WriteStream resource. - * - * @param {string} writeStreamName - * A fully-qualified path representing WriteStream resource. - * @returns {string} A string representing the stream. - */ - matchStreamFromWriteStreamName(writeStreamName: string) { - return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).stream; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.bigQueryReadStub && !this._terminated) { - return this.bigQueryReadStub.then(stub => { - this._terminated = true; - stub.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/v1/src/v1/big_query_read_client_config.json b/owl-bot-staging/v1/src/v1/big_query_read_client_config.json deleted file mode 100644 index 42b2735b..00000000 --- a/owl-bot-staging/v1/src/v1/big_query_read_client_config.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "interfaces": { - "google.cloud.bigquery.storage.v1.BigQueryRead": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ], - "unavailable": [ - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "CreateReadSession": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - }, - "ReadRows": { - "timeout_millis": 86400000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - }, - "SplitReadStream": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - } - } - } - } -} diff --git a/owl-bot-staging/v1/src/v1/big_query_read_proto_list.json b/owl-bot-staging/v1/src/v1/big_query_read_proto_list.json deleted file mode 100644 index f0274ac3..00000000 --- a/owl-bot-staging/v1/src/v1/big_query_read_proto_list.json +++ /dev/null @@ -1,8 +0,0 @@ -[ - "../../protos/google/cloud/bigquery/storage/v1/arrow.proto", - "../../protos/google/cloud/bigquery/storage/v1/avro.proto", - "../../protos/google/cloud/bigquery/storage/v1/protobuf.proto", - "../../protos/google/cloud/bigquery/storage/v1/storage.proto", - "../../protos/google/cloud/bigquery/storage/v1/stream.proto", - "../../protos/google/cloud/bigquery/storage/v1/table.proto" -] diff --git a/owl-bot-staging/v1/src/v1/big_query_write_client.ts b/owl-bot-staging/v1/src/v1/big_query_write_client.ts deleted file mode 100644 index 622e758e..00000000 --- a/owl-bot-staging/v1/src/v1/big_query_write_client.ts +++ /dev/null @@ -1,1028 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import * as gax from 'google-gax'; -import {Callback, CallOptions, Descriptors, ClientOptions, GoogleError} from 'google-gax'; - -import { PassThrough } from 'stream'; -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1/big_query_write_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './big_query_write_client_config.json'; - -const version = require('../../../package.json').version; - -/** - * BigQuery Write API. - * - * The Write API can be used to write data to BigQuery. - * - * For supplementary information about the Write API, see: - * https://cloud.google.com/bigquery/docs/write-api - * @class - * @memberof v1 - */ -export class BigQueryWriteClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - pathTemplates: {[name: string]: gax.PathTemplate}; - bigQueryWriteStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of BigQueryWriteClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean} [options.fallback] - Use HTTP fallback mode. - * In fallback mode, a special browser-compatible transport implementation is used - * instead of gRPC transport. In browser context (if the `window` object is defined) - * the fallback mode is enabled automatically; set `options.fallback` to `false` - * if you need to override this behavior. - */ - constructor(opts?: ClientOptions) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof BigQueryWriteClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gax.fallback : gax; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - // This API contains "path templates"; forward-slash-separated - // identifiers to uniquely identify resources within the API. - // Create useful helper objects for these. - this.pathTemplates = { - projectPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}' - ), - readSessionPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/sessions/{session}' - ), - readStreamPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/sessions/{session}/streams/{stream}' - ), - tablePathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/datasets/{dataset}/tables/{table}' - ), - writeStreamPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}' - ), - }; - - // Some of the methods on this service provide streaming responses. - // Provide descriptors for these. - this.descriptors.stream = { - appendRows: new this._gaxModule.StreamDescriptor(gax.StreamType.BIDI_STREAMING, opts.fallback === 'rest') - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.bigquery.storage.v1.BigQueryWrite', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = gax.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.bigQueryWriteStub) { - return this.bigQueryWriteStub; - } - - // Put together the "service stub" for - // google.cloud.bigquery.storage.v1.BigQueryWrite. - this.bigQueryWriteStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.storage.v1.BigQueryWrite') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.bigquery.storage.v1.BigQueryWrite, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const bigQueryWriteStubMethods = - ['createWriteStream', 'appendRows', 'getWriteStream', 'finalizeWriteStream', 'batchCommitWriteStreams', 'flushRows']; - for (const methodName of bigQueryWriteStubMethods) { - const callPromise = this.bigQueryWriteStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - if (methodName in this.descriptors.stream) { - const stream = new PassThrough(); - setImmediate(() => { - stream.emit('error', new GoogleError('The client has already been closed.')); - }); - return stream; - } - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.stream[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.bigQueryWriteStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'bigquerystorage.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'bigquerystorage.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/bigquery', - 'https://www.googleapis.com/auth/bigquery.insertdata', - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- -/** - * Creates a write stream to the given table. - * Additionally, every table has a special stream named '_default' - * to which data can be written. This stream doesn't need to be created using - * CreateWriteStream. It is a stream that can be used simultaneously by any - * number of clients. Data written to this stream is considered committed as - * soon as an acknowledgement is received. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. Reference to the table to which the stream belongs, in the format - * of `projects/{project}/datasets/{dataset}/tables/{table}`. - * @param {google.cloud.bigquery.storage.v1.WriteStream} request.writeStream - * Required. Stream to be created. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [WriteStream]{@link google.cloud.bigquery.storage.v1.WriteStream}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/big_query_write.create_write_stream.js - * region_tag:bigquerystorage_v1_generated_BigQueryWrite_CreateWriteStream_async - */ - createWriteStream( - request?: protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.bigquery.storage.v1.IWriteStream, - protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest|undefined, {}|undefined - ]>; - createWriteStream( - request: protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.IWriteStream, - protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest|null|undefined, - {}|null|undefined>): void; - createWriteStream( - request: protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.IWriteStream, - protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest|null|undefined, - {}|null|undefined>): void; - createWriteStream( - request?: protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.bigquery.storage.v1.IWriteStream, - protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.bigquery.storage.v1.IWriteStream, - protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.bigquery.storage.v1.IWriteStream, - protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = gax.routingHeader.fromParams({ - 'parent': request.parent || '', - }); - this.initialize(); - return this.innerApiCalls.createWriteStream(request, options, callback); - } -/** - * Gets information about a write stream. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.name - * Required. Name of the stream to get, in the form of - * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [WriteStream]{@link google.cloud.bigquery.storage.v1.WriteStream}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/big_query_write.get_write_stream.js - * region_tag:bigquerystorage_v1_generated_BigQueryWrite_GetWriteStream_async - */ - getWriteStream( - request?: protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.bigquery.storage.v1.IWriteStream, - protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest|undefined, {}|undefined - ]>; - getWriteStream( - request: protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.IWriteStream, - protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest|null|undefined, - {}|null|undefined>): void; - getWriteStream( - request: protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.IWriteStream, - protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest|null|undefined, - {}|null|undefined>): void; - getWriteStream( - request?: protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.bigquery.storage.v1.IWriteStream, - protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.bigquery.storage.v1.IWriteStream, - protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.bigquery.storage.v1.IWriteStream, - protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = gax.routingHeader.fromParams({ - 'name': request.name || '', - }); - this.initialize(); - return this.innerApiCalls.getWriteStream(request, options, callback); - } -/** - * Finalize a write stream so that no new data can be appended to the - * stream. Finalize is not supported on the '_default' stream. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.name - * Required. Name of the stream to finalize, in the form of - * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [FinalizeWriteStreamResponse]{@link google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/big_query_write.finalize_write_stream.js - * region_tag:bigquerystorage_v1_generated_BigQueryWrite_FinalizeWriteStream_async - */ - finalizeWriteStream( - request?: protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, - protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest|undefined, {}|undefined - ]>; - finalizeWriteStream( - request: protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, - protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest|null|undefined, - {}|null|undefined>): void; - finalizeWriteStream( - request: protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, - protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest|null|undefined, - {}|null|undefined>): void; - finalizeWriteStream( - request?: protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, - protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, - protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, - protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = gax.routingHeader.fromParams({ - 'name': request.name || '', - }); - this.initialize(); - return this.innerApiCalls.finalizeWriteStream(request, options, callback); - } -/** - * Atomically commits a group of `PENDING` streams that belong to the same - * `parent` table. - * - * Streams must be finalized before commit and cannot be committed multiple - * times. Once a stream is committed, data in the stream becomes available - * for read operations. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.parent - * Required. Parent table that all the streams should belong to, in the form of - * `projects/{project}/datasets/{dataset}/tables/{table}`. - * @param {string[]} request.writeStreams - * Required. The group of streams that will be committed atomically. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [BatchCommitWriteStreamsResponse]{@link google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/big_query_write.batch_commit_write_streams.js - * region_tag:bigquerystorage_v1_generated_BigQueryWrite_BatchCommitWriteStreams_async - */ - batchCommitWriteStreams( - request?: protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, - protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest|undefined, {}|undefined - ]>; - batchCommitWriteStreams( - request: protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, - protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest|null|undefined, - {}|null|undefined>): void; - batchCommitWriteStreams( - request: protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, - protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest|null|undefined, - {}|null|undefined>): void; - batchCommitWriteStreams( - request?: protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, - protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, - protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, - protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = gax.routingHeader.fromParams({ - 'parent': request.parent || '', - }); - this.initialize(); - return this.innerApiCalls.batchCommitWriteStreams(request, options, callback); - } -/** - * Flushes rows to a BUFFERED stream. - * - * If users are appending rows to BUFFERED stream, flush operation is - * required in order for the rows to become available for reading. A - * Flush operation flushes up to any previously flushed offset in a BUFFERED - * stream, to the offset specified in the request. - * - * Flush is not supported on the _default stream, since it is not BUFFERED. - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.writeStream - * Required. The stream that is the target of the flush operation. - * @param {google.protobuf.Int64Value} request.offset - * Ending offset of the flush operation. Rows before this offset(including - * this offset) will be flushed. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [FlushRowsResponse]{@link google.cloud.bigquery.storage.v1.FlushRowsResponse}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1/big_query_write.flush_rows.js - * region_tag:bigquerystorage_v1_generated_BigQueryWrite_FlushRows_async - */ - flushRows( - request?: protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, - protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest|undefined, {}|undefined - ]>; - flushRows( - request: protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, - protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest|null|undefined, - {}|null|undefined>): void; - flushRows( - request: protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest, - callback: Callback< - protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, - protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest|null|undefined, - {}|null|undefined>): void; - flushRows( - request?: protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, - protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, - protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, - protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = gax.routingHeader.fromParams({ - 'write_stream': request.writeStream || '', - }); - this.initialize(); - return this.innerApiCalls.flushRows(request, options, callback); - } - -/** - * Appends data to the given stream. - * - * If `offset` is specified, the `offset` is checked against the end of - * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an - * attempt is made to append to an offset beyond the current end of the stream - * or `ALREADY_EXISTS` if user provides an `offset` that has already been - * written to. User can retry with adjusted offset within the same RPC - * connection. If `offset` is not specified, append happens at the end of the - * stream. - * - * The response contains an optional offset at which the append - * happened. No offset information will be returned for appends to a - * default stream. - * - * Responses are received in the same order in which requests are sent. - * There will be one response for each successful inserted request. Responses - * may optionally embed error information if the originating AppendRequest was - * not successfully processed. - * - * The specifics of when successfully appended data is made visible to the - * table are governed by the type of stream: - * - * * For COMMITTED streams (which includes the default stream), data is - * visible immediately upon successful append. - * - * * For BUFFERED streams, data is made visible via a subsequent `FlushRows` - * rpc which advances a cursor to a newer offset in the stream. - * - * * For PENDING streams, data is not made visible until the stream itself is - * finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly - * committed via the `BatchCommitWriteStreams` rpc. - * - * Note: For users coding against the gRPC api directly, it may be - * necessary to supply the x-goog-request-params system parameter - * with `write_stream=`. - * - * More information about system parameters: - * https://cloud.google.com/apis/docs/system-parameters - * - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Stream} - * An object stream which is both readable and writable. It accepts objects - * representing [AppendRowsRequest]{@link google.cloud.bigquery.storage.v1.AppendRowsRequest} for write() method, and - * will emit objects representing [AppendRowsResponse]{@link google.cloud.bigquery.storage.v1.AppendRowsResponse} on 'data' event asynchronously. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#bi-directional-streaming) - * for more details and examples. - * @example include:samples/generated/v1/big_query_write.append_rows.js - * region_tag:bigquerystorage_v1_generated_BigQueryWrite_AppendRows_async - */ - appendRows( - options?: CallOptions): - gax.CancellableStream { - this.initialize(); - return this.innerApiCalls.appendRows(null, options); - } - - // -------------------- - // -- Path templates -- - // -------------------- - - /** - * Return a fully-qualified project resource name string. - * - * @param {string} project - * @returns {string} Resource name string. - */ - projectPath(project:string) { - return this.pathTemplates.projectPathTemplate.render({ - project: project, - }); - } - - /** - * Parse the project from Project resource. - * - * @param {string} projectName - * A fully-qualified path representing Project resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectName(projectName: string) { - return this.pathTemplates.projectPathTemplate.match(projectName).project; - } - - /** - * Return a fully-qualified readSession resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} session - * @returns {string} Resource name string. - */ - readSessionPath(project:string,location:string,session:string) { - return this.pathTemplates.readSessionPathTemplate.render({ - project: project, - location: location, - session: session, - }); - } - - /** - * Parse the project from ReadSession resource. - * - * @param {string} readSessionName - * A fully-qualified path representing ReadSession resource. - * @returns {string} A string representing the project. - */ - matchProjectFromReadSessionName(readSessionName: string) { - return this.pathTemplates.readSessionPathTemplate.match(readSessionName).project; - } - - /** - * Parse the location from ReadSession resource. - * - * @param {string} readSessionName - * A fully-qualified path representing ReadSession resource. - * @returns {string} A string representing the location. - */ - matchLocationFromReadSessionName(readSessionName: string) { - return this.pathTemplates.readSessionPathTemplate.match(readSessionName).location; - } - - /** - * Parse the session from ReadSession resource. - * - * @param {string} readSessionName - * A fully-qualified path representing ReadSession resource. - * @returns {string} A string representing the session. - */ - matchSessionFromReadSessionName(readSessionName: string) { - return this.pathTemplates.readSessionPathTemplate.match(readSessionName).session; - } - - /** - * Return a fully-qualified readStream resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} session - * @param {string} stream - * @returns {string} Resource name string. - */ - readStreamPath(project:string,location:string,session:string,stream:string) { - return this.pathTemplates.readStreamPathTemplate.render({ - project: project, - location: location, - session: session, - stream: stream, - }); - } - - /** - * Parse the project from ReadStream resource. - * - * @param {string} readStreamName - * A fully-qualified path representing ReadStream resource. - * @returns {string} A string representing the project. - */ - matchProjectFromReadStreamName(readStreamName: string) { - return this.pathTemplates.readStreamPathTemplate.match(readStreamName).project; - } - - /** - * Parse the location from ReadStream resource. - * - * @param {string} readStreamName - * A fully-qualified path representing ReadStream resource. - * @returns {string} A string representing the location. - */ - matchLocationFromReadStreamName(readStreamName: string) { - return this.pathTemplates.readStreamPathTemplate.match(readStreamName).location; - } - - /** - * Parse the session from ReadStream resource. - * - * @param {string} readStreamName - * A fully-qualified path representing ReadStream resource. - * @returns {string} A string representing the session. - */ - matchSessionFromReadStreamName(readStreamName: string) { - return this.pathTemplates.readStreamPathTemplate.match(readStreamName).session; - } - - /** - * Parse the stream from ReadStream resource. - * - * @param {string} readStreamName - * A fully-qualified path representing ReadStream resource. - * @returns {string} A string representing the stream. - */ - matchStreamFromReadStreamName(readStreamName: string) { - return this.pathTemplates.readStreamPathTemplate.match(readStreamName).stream; - } - - /** - * Return a fully-qualified table resource name string. - * - * @param {string} project - * @param {string} dataset - * @param {string} table - * @returns {string} Resource name string. - */ - tablePath(project:string,dataset:string,table:string) { - return this.pathTemplates.tablePathTemplate.render({ - project: project, - dataset: dataset, - table: table, - }); - } - - /** - * Parse the project from Table resource. - * - * @param {string} tableName - * A fully-qualified path representing Table resource. - * @returns {string} A string representing the project. - */ - matchProjectFromTableName(tableName: string) { - return this.pathTemplates.tablePathTemplate.match(tableName).project; - } - - /** - * Parse the dataset from Table resource. - * - * @param {string} tableName - * A fully-qualified path representing Table resource. - * @returns {string} A string representing the dataset. - */ - matchDatasetFromTableName(tableName: string) { - return this.pathTemplates.tablePathTemplate.match(tableName).dataset; - } - - /** - * Parse the table from Table resource. - * - * @param {string} tableName - * A fully-qualified path representing Table resource. - * @returns {string} A string representing the table. - */ - matchTableFromTableName(tableName: string) { - return this.pathTemplates.tablePathTemplate.match(tableName).table; - } - - /** - * Return a fully-qualified writeStream resource name string. - * - * @param {string} project - * @param {string} dataset - * @param {string} table - * @param {string} stream - * @returns {string} Resource name string. - */ - writeStreamPath(project:string,dataset:string,table:string,stream:string) { - return this.pathTemplates.writeStreamPathTemplate.render({ - project: project, - dataset: dataset, - table: table, - stream: stream, - }); - } - - /** - * Parse the project from WriteStream resource. - * - * @param {string} writeStreamName - * A fully-qualified path representing WriteStream resource. - * @returns {string} A string representing the project. - */ - matchProjectFromWriteStreamName(writeStreamName: string) { - return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).project; - } - - /** - * Parse the dataset from WriteStream resource. - * - * @param {string} writeStreamName - * A fully-qualified path representing WriteStream resource. - * @returns {string} A string representing the dataset. - */ - matchDatasetFromWriteStreamName(writeStreamName: string) { - return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).dataset; - } - - /** - * Parse the table from WriteStream resource. - * - * @param {string} writeStreamName - * A fully-qualified path representing WriteStream resource. - * @returns {string} A string representing the table. - */ - matchTableFromWriteStreamName(writeStreamName: string) { - return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).table; - } - - /** - * Parse the stream from WriteStream resource. - * - * @param {string} writeStreamName - * A fully-qualified path representing WriteStream resource. - * @returns {string} A string representing the stream. - */ - matchStreamFromWriteStreamName(writeStreamName: string) { - return this.pathTemplates.writeStreamPathTemplate.match(writeStreamName).stream; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.bigQueryWriteStub && !this._terminated) { - return this.bigQueryWriteStub.then(stub => { - this._terminated = true; - stub.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/v1/src/v1/big_query_write_client_config.json b/owl-bot-staging/v1/src/v1/big_query_write_client_config.json deleted file mode 100644 index 67eb3165..00000000 --- a/owl-bot-staging/v1/src/v1/big_query_write_client_config.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "interfaces": { - "google.cloud.bigquery.storage.v1.BigQueryWrite": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ], - "unavailable": [ - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "CreateWriteStream": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - }, - "AppendRows": { - "timeout_millis": 86400000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - }, - "GetWriteStream": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - }, - "FinalizeWriteStream": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - }, - "BatchCommitWriteStreams": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - }, - "FlushRows": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - } - } - } - } -} diff --git a/owl-bot-staging/v1/src/v1/big_query_write_proto_list.json b/owl-bot-staging/v1/src/v1/big_query_write_proto_list.json deleted file mode 100644 index f0274ac3..00000000 --- a/owl-bot-staging/v1/src/v1/big_query_write_proto_list.json +++ /dev/null @@ -1,8 +0,0 @@ -[ - "../../protos/google/cloud/bigquery/storage/v1/arrow.proto", - "../../protos/google/cloud/bigquery/storage/v1/avro.proto", - "../../protos/google/cloud/bigquery/storage/v1/protobuf.proto", - "../../protos/google/cloud/bigquery/storage/v1/storage.proto", - "../../protos/google/cloud/bigquery/storage/v1/stream.proto", - "../../protos/google/cloud/bigquery/storage/v1/table.proto" -] diff --git a/owl-bot-staging/v1/src/v1/gapic_metadata.json b/owl-bot-staging/v1/src/v1/gapic_metadata.json deleted file mode 100644 index f52c2dae..00000000 --- a/owl-bot-staging/v1/src/v1/gapic_metadata.json +++ /dev/null @@ -1,117 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "typescript", - "protoPackage": "google.cloud.bigquery.storage.v1", - "libraryPackage": "@google-cloud/bigquery-storage", - "services": { - "BigQueryRead": { - "clients": { - "grpc": { - "libraryClient": "BigQueryReadClient", - "rpcs": { - "CreateReadSession": { - "methods": [ - "createReadSession" - ] - }, - "SplitReadStream": { - "methods": [ - "splitReadStream" - ] - }, - "ReadRows": { - "methods": [ - "readRows" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "BigQueryReadClient", - "rpcs": { - "CreateReadSession": { - "methods": [ - "createReadSession" - ] - }, - "SplitReadStream": { - "methods": [ - "splitReadStream" - ] - } - } - } - } - }, - "BigQueryWrite": { - "clients": { - "grpc": { - "libraryClient": "BigQueryWriteClient", - "rpcs": { - "CreateWriteStream": { - "methods": [ - "createWriteStream" - ] - }, - "GetWriteStream": { - "methods": [ - "getWriteStream" - ] - }, - "FinalizeWriteStream": { - "methods": [ - "finalizeWriteStream" - ] - }, - "BatchCommitWriteStreams": { - "methods": [ - "batchCommitWriteStreams" - ] - }, - "FlushRows": { - "methods": [ - "flushRows" - ] - }, - "AppendRows": { - "methods": [ - "appendRows" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "BigQueryWriteClient", - "rpcs": { - "CreateWriteStream": { - "methods": [ - "createWriteStream" - ] - }, - "GetWriteStream": { - "methods": [ - "getWriteStream" - ] - }, - "FinalizeWriteStream": { - "methods": [ - "finalizeWriteStream" - ] - }, - "BatchCommitWriteStreams": { - "methods": [ - "batchCommitWriteStreams" - ] - }, - "FlushRows": { - "methods": [ - "flushRows" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1/src/v1/index.ts b/owl-bot-staging/v1/src/v1/index.ts deleted file mode 100644 index f3bacd94..00000000 --- a/owl-bot-staging/v1/src/v1/index.ts +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -export {BigQueryReadClient} from './big_query_read_client'; -export {BigQueryWriteClient} from './big_query_write_client'; diff --git a/owl-bot-staging/v1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1/system-test/fixtures/sample/src/index.js deleted file mode 100644 index ba02bb67..00000000 --- a/owl-bot-staging/v1/system-test/fixtures/sample/src/index.js +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -/* eslint-disable node/no-missing-require, no-unused-vars */ -const storage = require('@google-cloud/bigquery-storage'); - -function main() { - const bigQueryReadClient = new storage.BigQueryReadClient(); - const bigQueryWriteClient = new storage.BigQueryWriteClient(); -} - -main(); diff --git a/owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts deleted file mode 100644 index a0a1ed6d..00000000 --- a/owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import {BigQueryReadClient, BigQueryWriteClient} from '@google-cloud/bigquery-storage'; - -// check that the client class type name can be used -function doStuffWithBigQueryReadClient(client: BigQueryReadClient) { - client.close(); -} -function doStuffWithBigQueryWriteClient(client: BigQueryWriteClient) { - client.close(); -} - -function main() { - // check that the client instance can be created - const bigQueryReadClient = new BigQueryReadClient(); - doStuffWithBigQueryReadClient(bigQueryReadClient); - // check that the client instance can be created - const bigQueryWriteClient = new BigQueryWriteClient(); - doStuffWithBigQueryWriteClient(bigQueryWriteClient); -} - -main(); diff --git a/owl-bot-staging/v1/system-test/install.ts b/owl-bot-staging/v1/system-test/install.ts deleted file mode 100644 index 8ec45222..00000000 --- a/owl-bot-staging/v1/system-test/install.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import { packNTest } from 'pack-n-play'; -import { readFileSync } from 'fs'; -import { describe, it } from 'mocha'; - -describe('📦 pack-n-play test', () => { - - it('TypeScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'TypeScript user can use the type definitions', - ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() - } - }; - await packNTest(options); - }); - - it('JavaScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'JavaScript user can use the library', - ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() - } - }; - await packNTest(options); - }); - -}); diff --git a/owl-bot-staging/v1/test/gapic_big_query_read_v1.ts b/owl-bot-staging/v1/test/gapic_big_query_read_v1.ts deleted file mode 100644 index 2d7324d7..00000000 --- a/owl-bot-staging/v1/test/gapic_big_query_read_v1.ts +++ /dev/null @@ -1,669 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import { describe, it } from 'mocha'; -import * as bigqueryreadModule from '../src'; - -import {PassThrough} from 'stream'; - -import {protobuf} from 'google-gax'; - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { - return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); -} - -function stubServerStreamingCall(response?: ResponseType, error?: Error) { - const transformStub = error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); - const mockStream = new PassThrough({ - objectMode: true, - transform: transformStub, - }); - // write something to the stream to trigger transformStub and send the response back to the client - setImmediate(() => { mockStream.write({}); }); - setImmediate(() => { mockStream.end(); }); - return sinon.stub().returns(mockStream); -} - -describe('v1.BigQueryReadClient', () => { - it('has servicePath', () => { - const servicePath = bigqueryreadModule.v1.BigQueryReadClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = bigqueryreadModule.v1.BigQueryReadClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = bigqueryreadModule.v1.BigQueryReadClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.bigQueryReadStub, undefined); - await client.initialize(); - assert(client.bigQueryReadStub); - }); - - it('has close method for the initialized client', done => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.bigQueryReadStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.bigQueryReadStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - - describe('createReadSession', () => { - it('invokes createReadSession without error', async () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateReadSessionRequest()); - request.readSession = {}; - request.readSession.table = ''; - const expectedHeaderRequestParams = "read_session.table="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.ReadSession()); - client.innerApiCalls.createReadSession = stubSimpleCall(expectedResponse); - const [response] = await client.createReadSession(request); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.createReadSession as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes createReadSession without error using callback', async () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateReadSessionRequest()); - request.readSession = {}; - request.readSession.table = ''; - const expectedHeaderRequestParams = "read_session.table="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.ReadSession()); - client.innerApiCalls.createReadSession = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.createReadSession( - request, - (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.IReadSession|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.createReadSession as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes createReadSession with error', async () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateReadSessionRequest()); - request.readSession = {}; - request.readSession.table = ''; - const expectedHeaderRequestParams = "read_session.table="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedError = new Error('expected'); - client.innerApiCalls.createReadSession = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.createReadSession(request), expectedError); - assert((client.innerApiCalls.createReadSession as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes createReadSession with closed client', async () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateReadSessionRequest()); - request.readSession = {}; - request.readSession.table = ''; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.createReadSession(request), expectedError); - }); - }); - - describe('splitReadStream', () => { - it('invokes splitReadStream without error', async () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.SplitReadStreamRequest()); - request.name = ''; - const expectedHeaderRequestParams = "name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.SplitReadStreamResponse()); - client.innerApiCalls.splitReadStream = stubSimpleCall(expectedResponse); - const [response] = await client.splitReadStream(request); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.splitReadStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes splitReadStream without error using callback', async () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.SplitReadStreamRequest()); - request.name = ''; - const expectedHeaderRequestParams = "name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.SplitReadStreamResponse()); - client.innerApiCalls.splitReadStream = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.splitReadStream( - request, - (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.splitReadStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes splitReadStream with error', async () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.SplitReadStreamRequest()); - request.name = ''; - const expectedHeaderRequestParams = "name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedError = new Error('expected'); - client.innerApiCalls.splitReadStream = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.splitReadStream(request), expectedError); - assert((client.innerApiCalls.splitReadStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes splitReadStream with closed client', async () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.SplitReadStreamRequest()); - request.name = ''; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.splitReadStream(request), expectedError); - }); - }); - - describe('readRows', () => { - it('invokes readRows without error', async () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.ReadRowsRequest()); - request.readStream = ''; - const expectedHeaderRequestParams = "read_stream="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.ReadRowsResponse()); - client.innerApiCalls.readRows = stubServerStreamingCall(expectedResponse); - const stream = client.readRows(request); - const promise = new Promise((resolve, reject) => { - stream.on('data', (response: protos.google.cloud.bigquery.storage.v1.ReadRowsResponse) => { - resolve(response); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.readRows as SinonStub) - .getCall(0).calledWith(request, expectedOptions)); - }); - - it('invokes readRows with error', async () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.ReadRowsRequest()); - request.readStream = ''; - const expectedHeaderRequestParams = "read_stream="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedError = new Error('expected'); - client.innerApiCalls.readRows = stubServerStreamingCall(undefined, expectedError); - const stream = client.readRows(request); - const promise = new Promise((resolve, reject) => { - stream.on('data', (response: protos.google.cloud.bigquery.storage.v1.ReadRowsResponse) => { - resolve(response); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - await assert.rejects(promise, expectedError); - assert((client.innerApiCalls.readRows as SinonStub) - .getCall(0).calledWith(request, expectedOptions)); - }); - - it('invokes readRows with closed client', async () => { - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.ReadRowsRequest()); - request.readStream = ''; - const expectedError = new Error('The client has already been closed.'); - client.close(); - const stream = client.readRows(request); - const promise = new Promise((resolve, reject) => { - stream.on('data', (response: protos.google.cloud.bigquery.storage.v1.ReadRowsResponse) => { - resolve(response); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - await assert.rejects(promise, expectedError); - }); - }); - - describe('Path templates', () => { - - describe('project', () => { - const fakePath = "/rendered/path/project"; - const expectedParameters = { - project: "projectValue", - }; - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectPath', () => { - const result = client.projectPath("projectValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectName', () => { - const result = client.matchProjectFromProjectName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('readSession', () => { - const fakePath = "/rendered/path/readSession"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - session: "sessionValue", - }; - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.readSessionPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.readSessionPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('readSessionPath', () => { - const result = client.readSessionPath("projectValue", "locationValue", "sessionValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.readSessionPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromReadSessionName', () => { - const result = client.matchProjectFromReadSessionName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromReadSessionName', () => { - const result = client.matchLocationFromReadSessionName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchSessionFromReadSessionName', () => { - const result = client.matchSessionFromReadSessionName(fakePath); - assert.strictEqual(result, "sessionValue"); - assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('readStream', () => { - const fakePath = "/rendered/path/readStream"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - session: "sessionValue", - stream: "streamValue", - }; - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.readStreamPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.readStreamPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('readStreamPath', () => { - const result = client.readStreamPath("projectValue", "locationValue", "sessionValue", "streamValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.readStreamPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromReadStreamName', () => { - const result = client.matchProjectFromReadStreamName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromReadStreamName', () => { - const result = client.matchLocationFromReadStreamName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchSessionFromReadStreamName', () => { - const result = client.matchSessionFromReadStreamName(fakePath); - assert.strictEqual(result, "sessionValue"); - assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchStreamFromReadStreamName', () => { - const result = client.matchStreamFromReadStreamName(fakePath); - assert.strictEqual(result, "streamValue"); - assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('table', () => { - const fakePath = "/rendered/path/table"; - const expectedParameters = { - project: "projectValue", - dataset: "datasetValue", - table: "tableValue", - }; - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.tablePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.tablePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('tablePath', () => { - const result = client.tablePath("projectValue", "datasetValue", "tableValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.tablePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromTableName', () => { - const result = client.matchProjectFromTableName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.tablePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchDatasetFromTableName', () => { - const result = client.matchDatasetFromTableName(fakePath); - assert.strictEqual(result, "datasetValue"); - assert((client.pathTemplates.tablePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchTableFromTableName', () => { - const result = client.matchTableFromTableName(fakePath); - assert.strictEqual(result, "tableValue"); - assert((client.pathTemplates.tablePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('writeStream', () => { - const fakePath = "/rendered/path/writeStream"; - const expectedParameters = { - project: "projectValue", - dataset: "datasetValue", - table: "tableValue", - stream: "streamValue", - }; - const client = new bigqueryreadModule.v1.BigQueryReadClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.writeStreamPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.writeStreamPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('writeStreamPath', () => { - const result = client.writeStreamPath("projectValue", "datasetValue", "tableValue", "streamValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.writeStreamPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromWriteStreamName', () => { - const result = client.matchProjectFromWriteStreamName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchDatasetFromWriteStreamName', () => { - const result = client.matchDatasetFromWriteStreamName(fakePath); - assert.strictEqual(result, "datasetValue"); - assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchTableFromWriteStreamName', () => { - const result = client.matchTableFromWriteStreamName(fakePath); - assert.strictEqual(result, "tableValue"); - assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchStreamFromWriteStreamName', () => { - const result = client.matchStreamFromWriteStreamName(fakePath); - assert.strictEqual(result, "streamValue"); - assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - }); -}); diff --git a/owl-bot-staging/v1/test/gapic_big_query_write_v1.ts b/owl-bot-staging/v1/test/gapic_big_query_write_v1.ts deleted file mode 100644 index a5a7d4ee..00000000 --- a/owl-bot-staging/v1/test/gapic_big_query_write_v1.ts +++ /dev/null @@ -1,921 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import { describe, it } from 'mocha'; -import * as bigquerywriteModule from '../src'; - -import {PassThrough} from 'stream'; - -import {protobuf} from 'google-gax'; - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { - return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); -} - -function stubBidiStreamingCall(response?: ResponseType, error?: Error) { - const transformStub = error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); - const mockStream = new PassThrough({ - objectMode: true, - transform: transformStub, - }); - return sinon.stub().returns(mockStream); -} - -describe('v1.BigQueryWriteClient', () => { - it('has servicePath', () => { - const servicePath = bigquerywriteModule.v1.BigQueryWriteClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = bigquerywriteModule.v1.BigQueryWriteClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = bigquerywriteModule.v1.BigQueryWriteClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.bigQueryWriteStub, undefined); - await client.initialize(); - assert(client.bigQueryWriteStub); - }); - - it('has close method for the initialized client', done => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.bigQueryWriteStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.bigQueryWriteStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - - describe('createWriteStream', () => { - it('invokes createWriteStream without error', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest()); - request.parent = ''; - const expectedHeaderRequestParams = "parent="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.WriteStream()); - client.innerApiCalls.createWriteStream = stubSimpleCall(expectedResponse); - const [response] = await client.createWriteStream(request); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.createWriteStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes createWriteStream without error using callback', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest()); - request.parent = ''; - const expectedHeaderRequestParams = "parent="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.WriteStream()); - client.innerApiCalls.createWriteStream = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.createWriteStream( - request, - (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.IWriteStream|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.createWriteStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes createWriteStream with error', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest()); - request.parent = ''; - const expectedHeaderRequestParams = "parent="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedError = new Error('expected'); - client.innerApiCalls.createWriteStream = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.createWriteStream(request), expectedError); - assert((client.innerApiCalls.createWriteStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes createWriteStream with closed client', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest()); - request.parent = ''; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.createWriteStream(request), expectedError); - }); - }); - - describe('getWriteStream', () => { - it('invokes getWriteStream without error', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.GetWriteStreamRequest()); - request.name = ''; - const expectedHeaderRequestParams = "name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.WriteStream()); - client.innerApiCalls.getWriteStream = stubSimpleCall(expectedResponse); - const [response] = await client.getWriteStream(request); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.getWriteStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes getWriteStream without error using callback', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.GetWriteStreamRequest()); - request.name = ''; - const expectedHeaderRequestParams = "name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.WriteStream()); - client.innerApiCalls.getWriteStream = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.getWriteStream( - request, - (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.IWriteStream|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.getWriteStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes getWriteStream with error', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.GetWriteStreamRequest()); - request.name = ''; - const expectedHeaderRequestParams = "name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedError = new Error('expected'); - client.innerApiCalls.getWriteStream = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.getWriteStream(request), expectedError); - assert((client.innerApiCalls.getWriteStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes getWriteStream with closed client', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.GetWriteStreamRequest()); - request.name = ''; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.getWriteStream(request), expectedError); - }); - }); - - describe('finalizeWriteStream', () => { - it('invokes finalizeWriteStream without error', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest()); - request.name = ''; - const expectedHeaderRequestParams = "name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse()); - client.innerApiCalls.finalizeWriteStream = stubSimpleCall(expectedResponse); - const [response] = await client.finalizeWriteStream(request); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.finalizeWriteStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes finalizeWriteStream without error using callback', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest()); - request.name = ''; - const expectedHeaderRequestParams = "name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse()); - client.innerApiCalls.finalizeWriteStream = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.finalizeWriteStream( - request, - (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.finalizeWriteStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes finalizeWriteStream with error', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest()); - request.name = ''; - const expectedHeaderRequestParams = "name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedError = new Error('expected'); - client.innerApiCalls.finalizeWriteStream = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.finalizeWriteStream(request), expectedError); - assert((client.innerApiCalls.finalizeWriteStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes finalizeWriteStream with closed client', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest()); - request.name = ''; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.finalizeWriteStream(request), expectedError); - }); - }); - - describe('batchCommitWriteStreams', () => { - it('invokes batchCommitWriteStreams without error', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest()); - request.parent = ''; - const expectedHeaderRequestParams = "parent="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse()); - client.innerApiCalls.batchCommitWriteStreams = stubSimpleCall(expectedResponse); - const [response] = await client.batchCommitWriteStreams(request); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.batchCommitWriteStreams as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes batchCommitWriteStreams without error using callback', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest()); - request.parent = ''; - const expectedHeaderRequestParams = "parent="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse()); - client.innerApiCalls.batchCommitWriteStreams = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.batchCommitWriteStreams( - request, - (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.batchCommitWriteStreams as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes batchCommitWriteStreams with error', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest()); - request.parent = ''; - const expectedHeaderRequestParams = "parent="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedError = new Error('expected'); - client.innerApiCalls.batchCommitWriteStreams = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.batchCommitWriteStreams(request), expectedError); - assert((client.innerApiCalls.batchCommitWriteStreams as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes batchCommitWriteStreams with closed client', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest()); - request.parent = ''; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.batchCommitWriteStreams(request), expectedError); - }); - }); - - describe('flushRows', () => { - it('invokes flushRows without error', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FlushRowsRequest()); - request.writeStream = ''; - const expectedHeaderRequestParams = "write_stream="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FlushRowsResponse()); - client.innerApiCalls.flushRows = stubSimpleCall(expectedResponse); - const [response] = await client.flushRows(request); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.flushRows as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes flushRows without error using callback', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FlushRowsRequest()); - request.writeStream = ''; - const expectedHeaderRequestParams = "write_stream="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FlushRowsResponse()); - client.innerApiCalls.flushRows = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.flushRows( - request, - (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.flushRows as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes flushRows with error', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FlushRowsRequest()); - request.writeStream = ''; - const expectedHeaderRequestParams = "write_stream="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedError = new Error('expected'); - client.innerApiCalls.flushRows = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.flushRows(request), expectedError); - assert((client.innerApiCalls.flushRows as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes flushRows with closed client', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.FlushRowsRequest()); - request.writeStream = ''; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.flushRows(request), expectedError); - }); - }); - - describe('appendRows', () => { - it('invokes appendRows without error', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.AppendRowsRequest()); - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.AppendRowsResponse()); - client.innerApiCalls.appendRows = stubBidiStreamingCall(expectedResponse); - const stream = client.appendRows(); - const promise = new Promise((resolve, reject) => { - stream.on('data', (response: protos.google.cloud.bigquery.storage.v1.AppendRowsResponse) => { - resolve(response); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - stream.write(request); - stream.end(); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.appendRows as SinonStub) - .getCall(0).calledWith(null)); - assert.deepStrictEqual(((stream as unknown as PassThrough) - ._transform as SinonStub).getCall(0).args[0], request); - }); - - it('invokes appendRows with error', async () => { - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1.AppendRowsRequest()); - const expectedError = new Error('expected'); - client.innerApiCalls.appendRows = stubBidiStreamingCall(undefined, expectedError); - const stream = client.appendRows(); - const promise = new Promise((resolve, reject) => { - stream.on('data', (response: protos.google.cloud.bigquery.storage.v1.AppendRowsResponse) => { - resolve(response); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - stream.write(request); - stream.end(); - }); - await assert.rejects(promise, expectedError); - assert((client.innerApiCalls.appendRows as SinonStub) - .getCall(0).calledWith(null)); - assert.deepStrictEqual(((stream as unknown as PassThrough) - ._transform as SinonStub).getCall(0).args[0], request); - }); - }); - - describe('Path templates', () => { - - describe('project', () => { - const fakePath = "/rendered/path/project"; - const expectedParameters = { - project: "projectValue", - }; - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectPath', () => { - const result = client.projectPath("projectValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectName', () => { - const result = client.matchProjectFromProjectName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('readSession', () => { - const fakePath = "/rendered/path/readSession"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - session: "sessionValue", - }; - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.readSessionPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.readSessionPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('readSessionPath', () => { - const result = client.readSessionPath("projectValue", "locationValue", "sessionValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.readSessionPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromReadSessionName', () => { - const result = client.matchProjectFromReadSessionName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromReadSessionName', () => { - const result = client.matchLocationFromReadSessionName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchSessionFromReadSessionName', () => { - const result = client.matchSessionFromReadSessionName(fakePath); - assert.strictEqual(result, "sessionValue"); - assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('readStream', () => { - const fakePath = "/rendered/path/readStream"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - session: "sessionValue", - stream: "streamValue", - }; - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.readStreamPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.readStreamPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('readStreamPath', () => { - const result = client.readStreamPath("projectValue", "locationValue", "sessionValue", "streamValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.readStreamPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromReadStreamName', () => { - const result = client.matchProjectFromReadStreamName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromReadStreamName', () => { - const result = client.matchLocationFromReadStreamName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchSessionFromReadStreamName', () => { - const result = client.matchSessionFromReadStreamName(fakePath); - assert.strictEqual(result, "sessionValue"); - assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchStreamFromReadStreamName', () => { - const result = client.matchStreamFromReadStreamName(fakePath); - assert.strictEqual(result, "streamValue"); - assert((client.pathTemplates.readStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('table', () => { - const fakePath = "/rendered/path/table"; - const expectedParameters = { - project: "projectValue", - dataset: "datasetValue", - table: "tableValue", - }; - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.tablePathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.tablePathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('tablePath', () => { - const result = client.tablePath("projectValue", "datasetValue", "tableValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.tablePathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromTableName', () => { - const result = client.matchProjectFromTableName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.tablePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchDatasetFromTableName', () => { - const result = client.matchDatasetFromTableName(fakePath); - assert.strictEqual(result, "datasetValue"); - assert((client.pathTemplates.tablePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchTableFromTableName', () => { - const result = client.matchTableFromTableName(fakePath); - assert.strictEqual(result, "tableValue"); - assert((client.pathTemplates.tablePathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('writeStream', () => { - const fakePath = "/rendered/path/writeStream"; - const expectedParameters = { - project: "projectValue", - dataset: "datasetValue", - table: "tableValue", - stream: "streamValue", - }; - const client = new bigquerywriteModule.v1.BigQueryWriteClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.writeStreamPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.writeStreamPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('writeStreamPath', () => { - const result = client.writeStreamPath("projectValue", "datasetValue", "tableValue", "streamValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.writeStreamPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromWriteStreamName', () => { - const result = client.matchProjectFromWriteStreamName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchDatasetFromWriteStreamName', () => { - const result = client.matchDatasetFromWriteStreamName(fakePath); - assert.strictEqual(result, "datasetValue"); - assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchTableFromWriteStreamName', () => { - const result = client.matchTableFromWriteStreamName(fakePath); - assert.strictEqual(result, "tableValue"); - assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchStreamFromWriteStreamName', () => { - const result = client.matchStreamFromWriteStreamName(fakePath); - assert.strictEqual(result, "streamValue"); - assert((client.pathTemplates.writeStreamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - }); -}); diff --git a/owl-bot-staging/v1/tsconfig.json b/owl-bot-staging/v1/tsconfig.json deleted file mode 100644 index c78f1c88..00000000 --- a/owl-bot-staging/v1/tsconfig.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "extends": "./node_modules/gts/tsconfig-google.json", - "compilerOptions": { - "rootDir": ".", - "outDir": "build", - "resolveJsonModule": true, - "lib": [ - "es2018", - "dom" - ] - }, - "include": [ - "src/*.ts", - "src/**/*.ts", - "test/*.ts", - "test/**/*.ts", - "system-test/*.ts" - ] -} diff --git a/owl-bot-staging/v1/webpack.config.js b/owl-bot-staging/v1/webpack.config.js deleted file mode 100644 index bad73cc0..00000000 --- a/owl-bot-staging/v1/webpack.config.js +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const path = require('path'); - -module.exports = { - entry: './src/index.ts', - output: { - library: 'BigQueryRead', - filename: './big-query-read.js', - }, - node: { - child_process: 'empty', - fs: 'empty', - crypto: 'empty', - }, - resolve: { - alias: { - '../../../package.json': path.resolve(__dirname, 'package.json'), - }, - extensions: ['.js', '.json', '.ts'], - }, - module: { - rules: [ - { - test: /\.tsx?$/, - use: 'ts-loader', - exclude: /node_modules/ - }, - { - test: /node_modules[\\/]@grpc[\\/]grpc-js/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]grpc/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]retry-request/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]https?-proxy-agent/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]gtoken/, - use: 'null-loader' - }, - ], - }, - mode: 'production', -}; diff --git a/owl-bot-staging/v1beta1/.eslintignore b/owl-bot-staging/v1beta1/.eslintignore deleted file mode 100644 index cfc348ec..00000000 --- a/owl-bot-staging/v1beta1/.eslintignore +++ /dev/null @@ -1,7 +0,0 @@ -**/node_modules -**/.coverage -build/ -docs/ -protos/ -system-test/ -samples/generated/ diff --git a/owl-bot-staging/v1beta1/.eslintrc.json b/owl-bot-staging/v1beta1/.eslintrc.json deleted file mode 100644 index 78215349..00000000 --- a/owl-bot-staging/v1beta1/.eslintrc.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "extends": "./node_modules/gts" -} diff --git a/owl-bot-staging/v1beta1/.gitignore b/owl-bot-staging/v1beta1/.gitignore deleted file mode 100644 index 5d32b237..00000000 --- a/owl-bot-staging/v1beta1/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -**/*.log -**/node_modules -.coverage -coverage -.nyc_output -docs/ -out/ -build/ -system-test/secrets.js -system-test/*key.json -*.lock -.DS_Store -package-lock.json -__pycache__ diff --git a/owl-bot-staging/v1beta1/.jsdoc.js b/owl-bot-staging/v1beta1/.jsdoc.js deleted file mode 100644 index 21870f2a..00000000 --- a/owl-bot-staging/v1beta1/.jsdoc.js +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -'use strict'; - -module.exports = { - opts: { - readme: './README.md', - package: './package.json', - template: './node_modules/jsdoc-fresh', - recurse: true, - verbose: true, - destination: './docs/' - }, - plugins: [ - 'plugins/markdown', - 'jsdoc-region-tag' - ], - source: { - excludePattern: '(^|\\/|\\\\)[._]', - include: [ - 'build/src', - 'protos' - ], - includePattern: '\\.js$' - }, - templates: { - copyright: 'Copyright 2022 Google LLC', - includeDate: false, - sourceFiles: false, - systemName: '@google-cloud/bigquery-storage', - theme: 'lumen', - default: { - outputSourceFiles: false - } - }, - markdown: { - idInHeadings: true - } -}; diff --git a/owl-bot-staging/v1beta1/.mocharc.js b/owl-bot-staging/v1beta1/.mocharc.js deleted file mode 100644 index 481c522b..00000000 --- a/owl-bot-staging/v1beta1/.mocharc.js +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -const config = { - "enable-source-maps": true, - "throw-deprecation": true, - "timeout": 10000 -} -if (process.env.MOCHA_THROW_DEPRECATION === 'false') { - delete config['throw-deprecation']; -} -if (process.env.MOCHA_REPORTER) { - config.reporter = process.env.MOCHA_REPORTER; -} -if (process.env.MOCHA_REPORTER_OUTPUT) { - config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; -} -module.exports = config diff --git a/owl-bot-staging/v1beta1/.prettierrc.js b/owl-bot-staging/v1beta1/.prettierrc.js deleted file mode 100644 index 494e1478..00000000 --- a/owl-bot-staging/v1beta1/.prettierrc.js +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -module.exports = { - ...require('gts/.prettierrc.json') -} diff --git a/owl-bot-staging/v1beta1/README.md b/owl-bot-staging/v1beta1/README.md deleted file mode 100644 index f5dcfbaf..00000000 --- a/owl-bot-staging/v1beta1/README.md +++ /dev/null @@ -1 +0,0 @@ -Storage: Nodejs Client diff --git a/owl-bot-staging/v1beta1/linkinator.config.json b/owl-bot-staging/v1beta1/linkinator.config.json deleted file mode 100644 index befd23c8..00000000 --- a/owl-bot-staging/v1beta1/linkinator.config.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "recurse": true, - "skip": [ - "https://codecov.io/gh/googleapis/", - "www.googleapis.com", - "img.shields.io", - "https://console.cloud.google.com/cloudshell", - "https://support.google.com" - ], - "silent": true, - "concurrency": 5, - "retry": true, - "retryErrors": true, - "retryErrorsCount": 5, - "retryErrorsJitter": 3000 -} diff --git a/owl-bot-staging/v1beta1/package.json b/owl-bot-staging/v1beta1/package.json deleted file mode 100644 index 763726e4..00000000 --- a/owl-bot-staging/v1beta1/package.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "name": "@google-cloud/bigquery-storage", - "version": "0.1.0", - "description": "Storage client for Node.js", - "repository": "googleapis/nodejs-storage", - "license": "Apache-2.0", - "author": "Google LLC", - "main": "build/src/index.js", - "files": [ - "build/src", - "build/protos" - ], - "keywords": [ - "google apis client", - "google api client", - "google apis", - "google api", - "google", - "google cloud platform", - "google cloud", - "cloud", - "google storage", - "storage", - "big query storage" - ], - "scripts": { - "clean": "gts clean", - "compile": "tsc -p . && cp -r protos build/", - "compile-protos": "compileProtos src", - "docs": "jsdoc -c .jsdoc.js", - "predocs-test": "npm run docs", - "docs-test": "linkinator docs", - "fix": "gts fix", - "lint": "gts check", - "prepare": "npm run compile-protos && npm run compile", - "system-test": "c8 mocha build/system-test", - "test": "c8 mocha build/test" - }, - "dependencies": { - "google-gax": "^2.29.4" - }, - "devDependencies": { - "@types/mocha": "^9.1.0", - "@types/node": "^16.0.0", - "@types/sinon": "^10.0.8", - "c8": "^7.11.0", - "gts": "^3.1.0", - "jsdoc": "^3.6.7", - "jsdoc-fresh": "^1.1.1", - "jsdoc-region-tag": "^1.3.1", - "linkinator": "^3.0.0", - "mocha": "^9.1.4", - "null-loader": "^4.0.1", - "pack-n-play": "^1.0.0-2", - "sinon": "^13.0.0", - "ts-loader": "^9.2.6", - "typescript": "^4.5.5", - "webpack": "^5.67.0", - "webpack-cli": "^4.9.1" - }, - "engines": { - "node": ">=v10.24.0" - } -} diff --git a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto deleted file mode 100644 index f70c61c7..00000000 --- a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/arrow.proto +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1beta1; - -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; -option java_outer_classname = "ArrowProto"; -option java_package = "com.google.cloud.bigquery.storage.v1beta1"; - -// Arrow schema. -message ArrowSchema { - // IPC serialized Arrow schema. - bytes serialized_schema = 1; -} - -// Arrow RecordBatch. -message ArrowRecordBatch { - // IPC serialized Arrow RecordBatch. - bytes serialized_record_batch = 1; - - // The count of rows in the returning block. - int64 row_count = 2; -} diff --git a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/avro.proto b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/avro.proto deleted file mode 100644 index 7d034a28..00000000 --- a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/avro.proto +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1beta1; - -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; -option java_outer_classname = "AvroProto"; -option java_package = "com.google.cloud.bigquery.storage.v1beta1"; - -// Avro schema. -message AvroSchema { - // Json serialized schema, as described at - // https://avro.apache.org/docs/1.8.1/spec.html - string schema = 1; -} - -// Avro rows. -message AvroRows { - // Binary serialized rows in a block. - bytes serialized_binary_rows = 1; - - // The count of rows in the returning block. - int64 row_count = 2; -} diff --git a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto deleted file mode 100644 index 1ff8d8b5..00000000 --- a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/read_options.proto +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1beta1; - -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; -option java_package = "com.google.cloud.bigquery.storage.v1beta1"; - -// Options dictating how we read a table. -message TableReadOptions { - // Optional. Names of the fields in the table that should be read. If empty, - // all fields will be read. If the specified field is a nested field, all the - // sub-fields in the field will be selected. The output field order is - // unrelated to the order of fields in selected_fields. - repeated string selected_fields = 1; - - // Optional. SQL text filtering statement, similar to a WHERE clause in - // a query. Aggregates are not supported. - // - // Examples: "int_field > 5" - // "date_field = CAST('2014-9-27' as DATE)" - // "nullable_field is not NULL" - // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" - // "numeric_field BETWEEN 1.0 AND 5.0" - string row_restriction = 2; -} diff --git a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/storage.proto b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/storage.proto deleted file mode 100644 index 0d311418..00000000 --- a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/storage.proto +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1beta1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/cloud/bigquery/storage/v1beta1/arrow.proto"; -import "google/cloud/bigquery/storage/v1beta1/avro.proto"; -import "google/cloud/bigquery/storage/v1beta1/read_options.proto"; -import "google/cloud/bigquery/storage/v1beta1/table_reference.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; -option java_package = "com.google.cloud.bigquery.storage.v1beta1"; - -// BigQuery storage API. -// -// The BigQuery storage API can be used to read data stored in BigQuery. -service BigQueryStorage { - option (google.api.default_host) = "bigquerystorage.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/bigquery," - "https://www.googleapis.com/auth/cloud-platform"; - - // Creates a new read session. A read session divides the contents of a - // BigQuery table into one or more streams, which can then be used to read - // data from the table. The read session also specifies properties of the - // data to be read, such as a list of columns or a push-down filter describing - // the rows to be returned. - // - // A particular row can be read by at most one stream. When the caller has - // reached the end of each stream in the session, then all the data in the - // table has been read. - // - // Read sessions automatically expire 24 hours after they are created and do - // not require manual clean-up by the caller. - rpc CreateReadSession(CreateReadSessionRequest) returns (ReadSession) { - option (google.api.http) = { - post: "/v1beta1/{table_reference.project_id=projects/*}" - body: "*" - additional_bindings { - post: "/v1beta1/{table_reference.dataset_id=projects/*/datasets/*}" - body: "*" - } - }; - option (google.api.method_signature) = "table_reference,parent,requested_streams"; - } - - // Reads rows from the table in the format prescribed by the read session. - // Each response contains one or more table rows, up to a maximum of 10 MiB - // per response; read requests which attempt to read individual rows larger - // than this will fail. - // - // Each request also returns a set of stream statistics reflecting the - // estimated total number of rows in the read stream. This number is computed - // based on the total table size and the number of active streams in the read - // session, and may change as other streams continue to read data. - rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { - option (google.api.http) = { - get: "/v1beta1/{read_position.stream.name=projects/*/streams/*}" - }; - option (google.api.method_signature) = "read_position"; - } - - // Creates additional streams for a ReadSession. This API can be used to - // dynamically adjust the parallelism of a batch processing task upwards by - // adding additional workers. - rpc BatchCreateReadSessionStreams(BatchCreateReadSessionStreamsRequest) returns (BatchCreateReadSessionStreamsResponse) { - option (google.api.http) = { - post: "/v1beta1/{session.name=projects/*/sessions/*}" - body: "*" - }; - option (google.api.method_signature) = "session,requested_streams"; - } - - // Triggers the graceful termination of a single stream in a ReadSession. This - // API can be used to dynamically adjust the parallelism of a batch processing - // task downwards without losing data. - // - // This API does not delete the stream -- it remains visible in the - // ReadSession, and any data processed by the stream is not released to other - // streams. However, no additional data will be assigned to the stream once - // this call completes. Callers must continue reading data on the stream until - // the end of the stream is reached so that data which has already been - // assigned to the stream will be processed. - // - // This method will return an error if there are no other live streams - // in the Session, or if SplitReadStream() has been called on the given - // Stream. - rpc FinalizeStream(FinalizeStreamRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v1beta1/{stream.name=projects/*/streams/*}" - body: "*" - }; - option (google.api.method_signature) = "stream"; - } - - // Splits a given read stream into two Streams. These streams are referred to - // as the primary and the residual of the split. The original stream can still - // be read from in the same manner as before. Both of the returned streams can - // also be read from, and the total rows return by both child streams will be - // the same as the rows read from the original stream. - // - // Moreover, the two child streams will be allocated back to back in the - // original Stream. Concretely, it is guaranteed that for streams Original, - // Primary, and Residual, that Original[0-j] = Primary[0-j] and - // Original[j-n] = Residual[0-m] once the streams have been read to - // completion. - // - // This method is guaranteed to be idempotent. - rpc SplitReadStream(SplitReadStreamRequest) returns (SplitReadStreamResponse) { - option (google.api.http) = { - get: "/v1beta1/{original_stream.name=projects/*/streams/*}" - }; - option (google.api.method_signature) = "original_stream"; - } -} - -// Information about a single data stream within a read session. -message Stream { - option (google.api.resource) = { - type: "bigquerystorage.googleapis.com/Stream" - pattern: "projects/{project}/locations/{location}/streams/{stream}" - }; - - // Name of the stream, in the form - // `projects/{project_id}/locations/{location}/streams/{stream_id}`. - string name = 1; -} - -// Expresses a point within a given stream using an offset position. -message StreamPosition { - // Identifier for a given Stream. - Stream stream = 1; - - // Position in the stream. - int64 offset = 2; -} - -// Information returned from a `CreateReadSession` request. -message ReadSession { - option (google.api.resource) = { - type: "bigquerystorage.googleapis.com/ReadSession" - pattern: "projects/{project}/locations/{location}/sessions/{session}" - }; - - // Unique identifier for the session, in the form - // `projects/{project_id}/locations/{location}/sessions/{session_id}`. - string name = 1; - - // Time at which the session becomes invalid. After this time, subsequent - // requests to read this Session will return errors. - google.protobuf.Timestamp expire_time = 2; - - // The schema for the read. If read_options.selected_fields is set, the - // schema may be different from the table schema as it will only contain - // the selected fields. - oneof schema { - // Avro schema. - AvroSchema avro_schema = 5; - - // Arrow schema. - ArrowSchema arrow_schema = 6; - } - - // Streams associated with this session. - repeated Stream streams = 4; - - // Table that this ReadSession is reading from. - TableReference table_reference = 7; - - // Any modifiers which are applied when reading from the specified table. - TableModifiers table_modifiers = 8; - - // The strategy to use for distributing data among the streams. - ShardingStrategy sharding_strategy = 9; -} - -// Creates a new read session, which may include additional options such as -// requested parallelism, projection filters and constraints. -message CreateReadSessionRequest { - // Required. Reference to the table to read. - TableReference table_reference = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. String of the form `projects/{project_id}` indicating the - // project this ReadSession is associated with. This is the project that will - // be billed for usage. - string parent = 6 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - // Any modifiers to the Table (e.g. snapshot timestamp). - TableModifiers table_modifiers = 2; - - // Initial number of streams. If unset or 0, we will - // provide a value of streams so as to produce reasonable throughput. Must be - // non-negative. The number of streams may be lower than the requested number, - // depending on the amount parallelism that is reasonable for the table and - // the maximum amount of parallelism allowed by the system. - // - // Streams must be read starting from offset 0. - int32 requested_streams = 3; - - // Read options for this session (e.g. column selection, filters). - TableReadOptions read_options = 4; - - // Data output format. Currently default to Avro. - DataFormat format = 5; - - // The strategy to use for distributing data among multiple streams. Currently - // defaults to liquid sharding. - ShardingStrategy sharding_strategy = 7; -} - -// Data format for input or output data. -enum DataFormat { - // Data format is unspecified. - DATA_FORMAT_UNSPECIFIED = 0; - - // Avro is a standard open source row based file format. - // See https://avro.apache.org/ for more details. - AVRO = 1; - - ARROW = 3; -} - -// Strategy for distributing data among multiple streams in a read session. -enum ShardingStrategy { - // Same as LIQUID. - SHARDING_STRATEGY_UNSPECIFIED = 0; - - // Assigns data to each stream based on the client's read rate. The faster the - // client reads from a stream, the more data is assigned to the stream. In - // this strategy, it's possible to read all data from a single stream even if - // there are other streams present. - LIQUID = 1; - - // Assigns data to each stream such that roughly the same number of rows can - // be read from each stream. Because the server-side unit for assigning data - // is collections of rows, the API does not guarantee that each stream will - // return the same number or rows. Additionally, the limits are enforced based - // on the number of pre-filtering rows, so some filters can lead to lopsided - // assignments. - BALANCED = 2; -} - -// Requesting row data via `ReadRows` must provide Stream position information. -message ReadRowsRequest { - // Required. Identifier of the position in the stream to start reading from. - // The offset requested must be less than the last row read from ReadRows. - // Requesting a larger offset is undefined. - StreamPosition read_position = 1 [(google.api.field_behavior) = REQUIRED]; -} - -// Progress information for a given Stream. -message StreamStatus { - // Number of estimated rows in the current stream. May change over time as - // different readers in the stream progress at rates which are relatively fast - // or slow. - int64 estimated_row_count = 1; - - // A value in the range [0.0, 1.0] that represents the fraction of rows - // assigned to this stream that have been processed by the server. In the - // presence of read filters, the server may process more rows than it returns, - // so this value reflects progress through the pre-filtering rows. - // - // This value is only populated for sessions created through the BALANCED - // sharding strategy. - float fraction_consumed = 2; - - // Represents the progress of the current stream. - Progress progress = 4; - - // Whether this stream can be split. For sessions that use the LIQUID sharding - // strategy, this value is always false. For BALANCED sessions, this value is - // false when enough data have been read such that no more splits are possible - // at that point or beyond. For small tables or streams that are the result of - // a chain of splits, this value may never be true. - bool is_splittable = 3; -} - -message Progress { - // The fraction of rows assigned to the stream that have been processed by the - // server so far, not including the rows in the current response message. - // - // This value, along with `at_response_end`, can be used to interpolate the - // progress made as the rows in the message are being processed using the - // following formula: `at_response_start + (at_response_end - - // at_response_start) * rows_processed_from_response / rows_in_response`. - // - // Note that if a filter is provided, the `at_response_end` value of the - // previous response may not necessarily be equal to the `at_response_start` - // value of the current response. - float at_response_start = 1; - - // Similar to `at_response_start`, except that this value includes the rows in - // the current response. - float at_response_end = 2; -} - -// Information on if the current connection is being throttled. -message ThrottleStatus { - // How much this connection is being throttled. - // 0 is no throttling, 100 is completely throttled. - int32 throttle_percent = 1; -} - -// Response from calling `ReadRows` may include row data, progress and -// throttling information. -message ReadRowsResponse { - // Row data is returned in format specified during session creation. - oneof rows { - // Serialized row data in AVRO format. - AvroRows avro_rows = 3; - - // Serialized row data in Arrow RecordBatch format. - ArrowRecordBatch arrow_record_batch = 4; - } - - // Number of serialized rows in the rows block. This value is recorded here, - // in addition to the row_count values in the output-specific messages in - // `rows`, so that code which needs to record progress through the stream can - // do so in an output format-independent way. - int64 row_count = 6; - - // Estimated stream statistics. - StreamStatus status = 2; - - // Throttling status. If unset, the latest response still describes - // the current throttling status. - ThrottleStatus throttle_status = 5; -} - -// Information needed to request additional streams for an established read -// session. -message BatchCreateReadSessionStreamsRequest { - // Required. Must be a non-expired session obtained from a call to - // CreateReadSession. Only the name field needs to be set. - ReadSession session = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. Number of new streams requested. Must be positive. - // Number of added streams may be less than this, see CreateReadSessionRequest - // for more information. - int32 requested_streams = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// The response from `BatchCreateReadSessionStreams` returns the stream -// identifiers for the newly created streams. -message BatchCreateReadSessionStreamsResponse { - // Newly added streams. - repeated Stream streams = 1; -} - -// Request information for invoking `FinalizeStream`. -message FinalizeStreamRequest { - // Required. Stream to finalize. - Stream stream = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Request information for `SplitReadStream`. -message SplitReadStreamRequest { - // Required. Stream to split. - Stream original_stream = 1 [(google.api.field_behavior) = REQUIRED]; - - // A value in the range (0.0, 1.0) that specifies the fractional point at - // which the original stream should be split. The actual split point is - // evaluated on pre-filtered rows, so if a filter is provided, then there is - // no guarantee that the division of the rows between the new child streams - // will be proportional to this fractional value. Additionally, because the - // server-side unit for assigning data is collections of rows, this fraction - // will always map to to a data storage boundary on the server side. - float fraction = 2; -} - -// Response from `SplitReadStream`. -message SplitReadStreamResponse { - // Primary stream, which contains the beginning portion of - // |original_stream|. An empty value indicates that the original stream can no - // longer be split. - Stream primary_stream = 1; - - // Remainder stream, which contains the tail of |original_stream|. An empty - // value indicates that the original stream can no longer be split. - Stream remainder_stream = 2; -} diff --git a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto b/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto deleted file mode 100644 index 4269392f..00000000 --- a/owl-bot-staging/v1beta1/protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1beta1; - -import "google/api/resource.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"; -option java_outer_classname = "TableReferenceProto"; -option java_package = "com.google.cloud.bigquery.storage.v1beta1"; - -// Table reference that includes just the 3 strings needed to identify a table. -message TableReference { - // The assigned project ID of the project. - string project_id = 1; - - // The ID of the dataset in the above project. - string dataset_id = 2; - - // The ID of the table in the above dataset. - string table_id = 3; -} - -// All fields in this message optional. -message TableModifiers { - // The snapshot time of the table. If not set, interpreted as now. - google.protobuf.Timestamp snapshot_time = 1; -} diff --git a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js deleted file mode 100644 index 3d7b6d3e..00000000 --- a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(session, requestedStreams) { - // [START bigquerystorage_v1beta1_generated_BigQueryStorage_BatchCreateReadSessionStreams_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. Must be a non-expired session obtained from a call to - * CreateReadSession. Only the name field needs to be set. - */ - // const session = {} - /** - * Required. Number of new streams requested. Must be positive. - * Number of added streams may be less than this, see CreateReadSessionRequest - * for more information. - */ - // const requestedStreams = 1234 - - // Imports the Storage library - const {BigQueryStorageClient} = require('@google-cloud/bigquery-storage').v1beta1; - - // Instantiates a client - const storageClient = new BigQueryStorageClient(); - - async function callBatchCreateReadSessionStreams() { - // Construct request - const request = { - session, - requestedStreams, - }; - - // Run request - const response = await storageClient.batchCreateReadSessionStreams(request); - console.log(response); - } - - callBatchCreateReadSessionStreams(); - // [END bigquerystorage_v1beta1_generated_BigQueryStorage_BatchCreateReadSessionStreams_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.create_read_session.js b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.create_read_session.js deleted file mode 100644 index c71a464c..00000000 --- a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.create_read_session.js +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(tableReference, parent) { - // [START bigquerystorage_v1beta1_generated_BigQueryStorage_CreateReadSession_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. Reference to the table to read. - */ - // const tableReference = {} - /** - * Required. String of the form `projects/{project_id}` indicating the - * project this ReadSession is associated with. This is the project that will - * be billed for usage. - */ - // const parent = 'abc123' - /** - * Any modifiers to the Table (e.g. snapshot timestamp). - */ - // const tableModifiers = {} - /** - * Initial number of streams. If unset or 0, we will - * provide a value of streams so as to produce reasonable throughput. Must be - * non-negative. The number of streams may be lower than the requested number, - * depending on the amount parallelism that is reasonable for the table and - * the maximum amount of parallelism allowed by the system. - * Streams must be read starting from offset 0. - */ - // const requestedStreams = 1234 - /** - * Read options for this session (e.g. column selection, filters). - */ - // const readOptions = {} - /** - * Data output format. Currently default to Avro. - */ - // const format = {} - /** - * The strategy to use for distributing data among multiple streams. Currently - * defaults to liquid sharding. - */ - // const shardingStrategy = {} - - // Imports the Storage library - const {BigQueryStorageClient} = require('@google-cloud/bigquery-storage').v1beta1; - - // Instantiates a client - const storageClient = new BigQueryStorageClient(); - - async function callCreateReadSession() { - // Construct request - const request = { - tableReference, - parent, - }; - - // Run request - const response = await storageClient.createReadSession(request); - console.log(response); - } - - callCreateReadSession(); - // [END bigquerystorage_v1beta1_generated_BigQueryStorage_CreateReadSession_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.finalize_stream.js b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.finalize_stream.js deleted file mode 100644 index 51d63d0b..00000000 --- a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.finalize_stream.js +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(stream) { - // [START bigquerystorage_v1beta1_generated_BigQueryStorage_FinalizeStream_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. Stream to finalize. - */ - // const stream = {} - - // Imports the Storage library - const {BigQueryStorageClient} = require('@google-cloud/bigquery-storage').v1beta1; - - // Instantiates a client - const storageClient = new BigQueryStorageClient(); - - async function callFinalizeStream() { - // Construct request - const request = { - stream, - }; - - // Run request - const response = await storageClient.finalizeStream(request); - console.log(response); - } - - callFinalizeStream(); - // [END bigquerystorage_v1beta1_generated_BigQueryStorage_FinalizeStream_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.read_rows.js b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.read_rows.js deleted file mode 100644 index 65924b17..00000000 --- a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.read_rows.js +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(readPosition) { - // [START bigquerystorage_v1beta1_generated_BigQueryStorage_ReadRows_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. Identifier of the position in the stream to start reading from. - * The offset requested must be less than the last row read from ReadRows. - * Requesting a larger offset is undefined. - */ - // const readPosition = {} - - // Imports the Storage library - const {BigQueryStorageClient} = require('@google-cloud/bigquery-storage').v1beta1; - - // Instantiates a client - const storageClient = new BigQueryStorageClient(); - - async function callReadRows() { - // Construct request - const request = { - readPosition, - }; - - // Run request - const stream = await storageClient.readRows(request); - stream.on('data', (response) => { console.log(response) }); - stream.on('error', (err) => { throw(err) }); - stream.on('end', () => { /* API call completed */ }); - } - - callReadRows(); - // [END bigquerystorage_v1beta1_generated_BigQueryStorage_ReadRows_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.split_read_stream.js b/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.split_read_stream.js deleted file mode 100644 index 24ae597d..00000000 --- a/owl-bot-staging/v1beta1/samples/generated/v1beta1/big_query_storage.split_read_stream.js +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(originalStream) { - // [START bigquerystorage_v1beta1_generated_BigQueryStorage_SplitReadStream_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Required. Stream to split. - */ - // const originalStream = {} - /** - * A value in the range (0.0, 1.0) that specifies the fractional point at - * which the original stream should be split. The actual split point is - * evaluated on pre-filtered rows, so if a filter is provided, then there is - * no guarantee that the division of the rows between the new child streams - * will be proportional to this fractional value. Additionally, because the - * server-side unit for assigning data is collections of rows, this fraction - * will always map to to a data storage boundary on the server side. - */ - // const fraction = 1234 - - // Imports the Storage library - const {BigQueryStorageClient} = require('@google-cloud/bigquery-storage').v1beta1; - - // Instantiates a client - const storageClient = new BigQueryStorageClient(); - - async function callSplitReadStream() { - // Construct request - const request = { - originalStream, - }; - - // Run request - const response = await storageClient.splitReadStream(request); - console.log(response); - } - - callSplitReadStream(); - // [END bigquerystorage_v1beta1_generated_BigQueryStorage_SplitReadStream_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.bigquery.storage.v1beta1.json b/owl-bot-staging/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.bigquery.storage.v1beta1.json deleted file mode 100644 index a36b0694..00000000 --- a/owl-bot-staging/v1beta1/samples/generated/v1beta1/snippet_metadata.google.cloud.bigquery.storage.v1beta1.json +++ /dev/null @@ -1,247 +0,0 @@ -{ - "clientLibrary": { - "name": "nodejs-storage", - "version": "0.1.0", - "language": "TYPESCRIPT", - "apis": [ - { - "id": "google.cloud.bigquery.storage.v1beta1", - "version": "v1beta1" - } - ] - }, - "snippets": [ - { - "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_CreateReadSession_async", - "title": "BigQueryStorage createReadSession Sample", - "origin": "API_DEFINITION", - "description": " Creates a new read session. A read session divides the contents of a BigQuery table into one or more streams, which can then be used to read data from the table. The read session also specifies properties of the data to be read, such as a list of columns or a push-down filter describing the rows to be returned. A particular row can be read by at most one stream. When the caller has reached the end of each stream in the session, then all the data in the table has been read. Read sessions automatically expire 24 hours after they are created and do not require manual clean-up by the caller.", - "canonical": true, - "file": "big_query_storage.create_read_session.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 83, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "CreateReadSession", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.CreateReadSession", - "async": true, - "parameters": [ - { - "name": "table_reference", - "type": ".google.cloud.bigquery.storage.v1beta1.TableReference" - }, - { - "name": "parent", - "type": "TYPE_STRING" - }, - { - "name": "table_modifiers", - "type": ".google.cloud.bigquery.storage.v1beta1.TableModifiers" - }, - { - "name": "requested_streams", - "type": "TYPE_INT32" - }, - { - "name": "read_options", - "type": ".google.cloud.bigquery.storage.v1beta1.TableReadOptions" - }, - { - "name": "format", - "type": ".google.cloud.bigquery.storage.v1beta1.DataFormat" - }, - { - "name": "sharding_strategy", - "type": ".google.cloud.bigquery.storage.v1beta1.ShardingStrategy" - } - ], - "resultType": ".google.cloud.bigquery.storage.v1beta1.ReadSession", - "client": { - "shortName": "BigQueryStorageClient", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" - }, - "method": { - "shortName": "CreateReadSession", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.CreateReadSession", - "service": { - "shortName": "BigQueryStorage", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" - } - } - } - }, - { - "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_ReadRows_async", - "title": "BigQueryStorage readRows Sample", - "origin": "API_DEFINITION", - "description": " Reads rows from the table in the format prescribed by the read session. Each response contains one or more table rows, up to a maximum of 10 MiB per response; read requests which attempt to read individual rows larger than this will fail. Each request also returns a set of stream statistics reflecting the estimated total number of rows in the read stream. This number is computed based on the total table size and the number of active streams in the read session, and may change as other streams continue to read data.", - "canonical": true, - "file": "big_query_storage.read_rows.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 54, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "ReadRows", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.ReadRows", - "async": true, - "parameters": [ - { - "name": "read_position", - "type": ".google.cloud.bigquery.storage.v1beta1.StreamPosition" - } - ], - "resultType": ".google.cloud.bigquery.storage.v1beta1.ReadRowsResponse", - "client": { - "shortName": "BigQueryStorageClient", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" - }, - "method": { - "shortName": "ReadRows", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.ReadRows", - "service": { - "shortName": "BigQueryStorage", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" - } - } - } - }, - { - "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_BatchCreateReadSessionStreams_async", - "title": "BigQueryStorage batchCreateReadSessionStreams Sample", - "origin": "API_DEFINITION", - "description": " Creates additional streams for a ReadSession. This API can be used to dynamically adjust the parallelism of a batch processing task upwards by adding additional workers.", - "canonical": true, - "file": "big_query_storage.batch_create_read_session_streams.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 58, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "BatchCreateReadSessionStreams", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.BatchCreateReadSessionStreams", - "async": true, - "parameters": [ - { - "name": "session", - "type": ".google.cloud.bigquery.storage.v1beta1.ReadSession" - }, - { - "name": "requested_streams", - "type": "TYPE_INT32" - } - ], - "resultType": ".google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse", - "client": { - "shortName": "BigQueryStorageClient", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" - }, - "method": { - "shortName": "BatchCreateReadSessionStreams", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.BatchCreateReadSessionStreams", - "service": { - "shortName": "BigQueryStorage", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" - } - } - } - }, - { - "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_FinalizeStream_async", - "title": "BigQueryStorage finalizeStream Sample", - "origin": "API_DEFINITION", - "description": " Triggers the graceful termination of a single stream in a ReadSession. This API can be used to dynamically adjust the parallelism of a batch processing task downwards without losing data. This API does not delete the stream -- it remains visible in the ReadSession, and any data processed by the stream is not released to other streams. However, no additional data will be assigned to the stream once this call completes. Callers must continue reading data on the stream until the end of the stream is reached so that data which has already been assigned to the stream will be processed. This method will return an error if there are no other live streams in the Session, or if SplitReadStream() has been called on the given Stream.", - "canonical": true, - "file": "big_query_storage.finalize_stream.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 50, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "FinalizeStream", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.FinalizeStream", - "async": true, - "parameters": [ - { - "name": "stream", - "type": ".google.cloud.bigquery.storage.v1beta1.Stream" - } - ], - "resultType": ".google.protobuf.Empty", - "client": { - "shortName": "BigQueryStorageClient", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" - }, - "method": { - "shortName": "FinalizeStream", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.FinalizeStream", - "service": { - "shortName": "BigQueryStorage", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" - } - } - } - }, - { - "regionTag": "bigquerystorage_v1beta1_generated_BigQueryStorage_SplitReadStream_async", - "title": "BigQueryStorage splitReadStream Sample", - "origin": "API_DEFINITION", - "description": " Splits a given read stream into two Streams. These streams are referred to as the primary and the residual of the split. The original stream can still be read from in the same manner as before. Both of the returned streams can also be read from, and the total rows return by both child streams will be the same as the rows read from the original stream. Moreover, the two child streams will be allocated back to back in the original Stream. Concretely, it is guaranteed that for streams Original, Primary, and Residual, that Original[0-j] = Primary[0-j] and Original[j-n] = Residual[0-m] once the streams have been read to completion. This method is guaranteed to be idempotent.", - "canonical": true, - "file": "big_query_storage.split_read_stream.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 60, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "SplitReadStream", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.SplitReadStream", - "async": true, - "parameters": [ - { - "name": "original_stream", - "type": ".google.cloud.bigquery.storage.v1beta1.Stream" - }, - { - "name": "fraction", - "type": "TYPE_FLOAT" - } - ], - "resultType": ".google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse", - "client": { - "shortName": "BigQueryStorageClient", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient" - }, - "method": { - "shortName": "SplitReadStream", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage.SplitReadStream", - "service": { - "shortName": "BigQueryStorage", - "fullName": "google.cloud.bigquery.storage.v1beta1.BigQueryStorage" - } - } - } - } - ] -} diff --git a/owl-bot-staging/v1beta1/src/index.ts b/owl-bot-staging/v1beta1/src/index.ts deleted file mode 100644 index 96c87b19..00000000 --- a/owl-bot-staging/v1beta1/src/index.ts +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as v1beta1 from './v1beta1'; -const BigQueryStorageClient = v1beta1.BigQueryStorageClient; -type BigQueryStorageClient = v1beta1.BigQueryStorageClient; -export {v1beta1, BigQueryStorageClient}; -export default {v1beta1, BigQueryStorageClient}; -import * as protos from '../protos/protos'; -export {protos} diff --git a/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client.ts b/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client.ts deleted file mode 100644 index a1f8bb9c..00000000 --- a/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client.ts +++ /dev/null @@ -1,852 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import * as gax from 'google-gax'; -import {Callback, CallOptions, Descriptors, ClientOptions, GoogleError} from 'google-gax'; - -import { PassThrough } from 'stream'; -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1beta1/big_query_storage_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './big_query_storage_client_config.json'; - -const version = require('../../../package.json').version; - -/** - * BigQuery storage API. - * - * The BigQuery storage API can be used to read data stored in BigQuery. - * @class - * @memberof v1beta1 - */ -export class BigQueryStorageClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - pathTemplates: {[name: string]: gax.PathTemplate}; - bigQueryStorageStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of BigQueryStorageClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean} [options.fallback] - Use HTTP fallback mode. - * In fallback mode, a special browser-compatible transport implementation is used - * instead of gRPC transport. In browser context (if the `window` object is defined) - * the fallback mode is enabled automatically; set `options.fallback` to `false` - * if you need to override this behavior. - */ - constructor(opts?: ClientOptions) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof BigQueryStorageClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gax.fallback : gax; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - // This API contains "path templates"; forward-slash-separated - // identifiers to uniquely identify resources within the API. - // Create useful helper objects for these. - this.pathTemplates = { - projectPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}' - ), - readSessionPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/sessions/{session}' - ), - streamPathTemplate: new this._gaxModule.PathTemplate( - 'projects/{project}/locations/{location}/streams/{stream}' - ), - }; - - // Some of the methods on this service provide streaming responses. - // Provide descriptors for these. - this.descriptors.stream = { - readRows: new this._gaxModule.StreamDescriptor(gax.StreamType.SERVER_STREAMING, opts.fallback === 'rest') - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.bigquery.storage.v1beta1.BigQueryStorage', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = gax.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.bigQueryStorageStub) { - return this.bigQueryStorageStub; - } - - // Put together the "service stub" for - // google.cloud.bigquery.storage.v1beta1.BigQueryStorage. - this.bigQueryStorageStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.storage.v1beta1.BigQueryStorage') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.bigquery.storage.v1beta1.BigQueryStorage, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const bigQueryStorageStubMethods = - ['createReadSession', 'readRows', 'batchCreateReadSessionStreams', 'finalizeStream', 'splitReadStream']; - for (const methodName of bigQueryStorageStubMethods) { - const callPromise = this.bigQueryStorageStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - if (methodName in this.descriptors.stream) { - const stream = new PassThrough(); - setImmediate(() => { - stream.emit('error', new GoogleError('The client has already been closed.')); - }); - return stream; - } - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.stream[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.bigQueryStorageStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'bigquerystorage.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'bigquerystorage.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/bigquery', - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- -/** - * Creates a new read session. A read session divides the contents of a - * BigQuery table into one or more streams, which can then be used to read - * data from the table. The read session also specifies properties of the - * data to be read, such as a list of columns or a push-down filter describing - * the rows to be returned. - * - * A particular row can be read by at most one stream. When the caller has - * reached the end of each stream in the session, then all the data in the - * table has been read. - * - * Read sessions automatically expire 24 hours after they are created and do - * not require manual clean-up by the caller. - * - * @param {Object} request - * The request object that will be sent. - * @param {google.cloud.bigquery.storage.v1beta1.TableReference} request.tableReference - * Required. Reference to the table to read. - * @param {string} request.parent - * Required. String of the form `projects/{project_id}` indicating the - * project this ReadSession is associated with. This is the project that will - * be billed for usage. - * @param {google.cloud.bigquery.storage.v1beta1.TableModifiers} request.tableModifiers - * Any modifiers to the Table (e.g. snapshot timestamp). - * @param {number} request.requestedStreams - * Initial number of streams. If unset or 0, we will - * provide a value of streams so as to produce reasonable throughput. Must be - * non-negative. The number of streams may be lower than the requested number, - * depending on the amount parallelism that is reasonable for the table and - * the maximum amount of parallelism allowed by the system. - * - * Streams must be read starting from offset 0. - * @param {google.cloud.bigquery.storage.v1beta1.TableReadOptions} request.readOptions - * Read options for this session (e.g. column selection, filters). - * @param {google.cloud.bigquery.storage.v1beta1.DataFormat} request.format - * Data output format. Currently default to Avro. - * @param {google.cloud.bigquery.storage.v1beta1.ShardingStrategy} request.shardingStrategy - * The strategy to use for distributing data among multiple streams. Currently - * defaults to liquid sharding. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [ReadSession]{@link google.cloud.bigquery.storage.v1beta1.ReadSession}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1beta1/big_query_storage.create_read_session.js - * region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_CreateReadSession_async - */ - createReadSession( - request?: protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.bigquery.storage.v1beta1.IReadSession, - protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|undefined, {}|undefined - ]>; - createReadSession( - request: protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.bigquery.storage.v1beta1.IReadSession, - protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|null|undefined, - {}|null|undefined>): void; - createReadSession( - request: protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, - callback: Callback< - protos.google.cloud.bigquery.storage.v1beta1.IReadSession, - protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|null|undefined, - {}|null|undefined>): void; - createReadSession( - request?: protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.bigquery.storage.v1beta1.IReadSession, - protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.bigquery.storage.v1beta1.IReadSession, - protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.bigquery.storage.v1beta1.IReadSession, - protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = gax.routingHeader.fromParams({ - 'table_reference.project_id': request.tableReference!.projectId || '', - 'table_reference.dataset_id': request.tableReference!.datasetId || '', - }); - this.initialize(); - return this.innerApiCalls.createReadSession(request, options, callback); - } -/** - * Creates additional streams for a ReadSession. This API can be used to - * dynamically adjust the parallelism of a batch processing task upwards by - * adding additional workers. - * - * @param {Object} request - * The request object that will be sent. - * @param {google.cloud.bigquery.storage.v1beta1.ReadSession} request.session - * Required. Must be a non-expired session obtained from a call to - * CreateReadSession. Only the name field needs to be set. - * @param {number} request.requestedStreams - * Required. Number of new streams requested. Must be positive. - * Number of added streams may be less than this, see CreateReadSessionRequest - * for more information. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [BatchCreateReadSessionStreamsResponse]{@link google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js - * region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_BatchCreateReadSessionStreams_async - */ - batchCreateReadSessionStreams( - request?: protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, - protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|undefined, {}|undefined - ]>; - batchCreateReadSessionStreams( - request: protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, - protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|null|undefined, - {}|null|undefined>): void; - batchCreateReadSessionStreams( - request: protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, - callback: Callback< - protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, - protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|null|undefined, - {}|null|undefined>): void; - batchCreateReadSessionStreams( - request?: protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, - protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, - protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, - protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = gax.routingHeader.fromParams({ - 'session.name': request.session!.name || '', - }); - this.initialize(); - return this.innerApiCalls.batchCreateReadSessionStreams(request, options, callback); - } -/** - * Triggers the graceful termination of a single stream in a ReadSession. This - * API can be used to dynamically adjust the parallelism of a batch processing - * task downwards without losing data. - * - * This API does not delete the stream -- it remains visible in the - * ReadSession, and any data processed by the stream is not released to other - * streams. However, no additional data will be assigned to the stream once - * this call completes. Callers must continue reading data on the stream until - * the end of the stream is reached so that data which has already been - * assigned to the stream will be processed. - * - * This method will return an error if there are no other live streams - * in the Session, or if SplitReadStream() has been called on the given - * Stream. - * - * @param {Object} request - * The request object that will be sent. - * @param {google.cloud.bigquery.storage.v1beta1.Stream} request.stream - * Required. Stream to finalize. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1beta1/big_query_storage.finalize_stream.js - * region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_FinalizeStream_async - */ - finalizeStream( - request?: protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, - options?: CallOptions): - Promise<[ - protos.google.protobuf.IEmpty, - protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|undefined, {}|undefined - ]>; - finalizeStream( - request: protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, - options: CallOptions, - callback: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|null|undefined, - {}|null|undefined>): void; - finalizeStream( - request: protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, - callback: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|null|undefined, - {}|null|undefined>): void; - finalizeStream( - request?: protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.protobuf.IEmpty, - protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.protobuf.IEmpty, - protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = gax.routingHeader.fromParams({ - 'stream.name': request.stream!.name || '', - }); - this.initialize(); - return this.innerApiCalls.finalizeStream(request, options, callback); - } -/** - * Splits a given read stream into two Streams. These streams are referred to - * as the primary and the residual of the split. The original stream can still - * be read from in the same manner as before. Both of the returned streams can - * also be read from, and the total rows return by both child streams will be - * the same as the rows read from the original stream. - * - * Moreover, the two child streams will be allocated back to back in the - * original Stream. Concretely, it is guaranteed that for streams Original, - * Primary, and Residual, that Original[0-j] = Primary[0-j] and - * Original[j-n] = Residual[0-m] once the streams have been read to - * completion. - * - * This method is guaranteed to be idempotent. - * - * @param {Object} request - * The request object that will be sent. - * @param {google.cloud.bigquery.storage.v1beta1.Stream} request.originalStream - * Required. Stream to split. - * @param {number} request.fraction - * A value in the range (0.0, 1.0) that specifies the fractional point at - * which the original stream should be split. The actual split point is - * evaluated on pre-filtered rows, so if a filter is provided, then there is - * no guarantee that the division of the rows between the new child streams - * will be proportional to this fractional value. Additionally, because the - * server-side unit for assigning data is collections of rows, this fraction - * will always map to to a data storage boundary on the server side. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [SplitReadStreamResponse]{@link google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse}. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods) - * for more details and examples. - * @example include:samples/generated/v1beta1/big_query_storage.split_read_stream.js - * region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_SplitReadStream_async - */ - splitReadStream( - request?: protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest, - options?: CallOptions): - Promise<[ - protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, - protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|undefined, {}|undefined - ]>; - splitReadStream( - request: protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest, - options: CallOptions, - callback: Callback< - protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, - protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|null|undefined, - {}|null|undefined>): void; - splitReadStream( - request: protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest, - callback: Callback< - protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, - protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|null|undefined, - {}|null|undefined>): void; - splitReadStream( - request?: protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest, - optionsOrCallback?: CallOptions|Callback< - protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, - protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|null|undefined, - {}|null|undefined>, - callback?: Callback< - protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, - protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|null|undefined, - {}|null|undefined>): - Promise<[ - protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, - protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = gax.routingHeader.fromParams({ - 'original_stream.name': request.originalStream!.name || '', - }); - this.initialize(); - return this.innerApiCalls.splitReadStream(request, options, callback); - } - -/** - * Reads rows from the table in the format prescribed by the read session. - * Each response contains one or more table rows, up to a maximum of 10 MiB - * per response; read requests which attempt to read individual rows larger - * than this will fail. - * - * Each request also returns a set of stream statistics reflecting the - * estimated total number of rows in the read stream. This number is computed - * based on the total table size and the number of active streams in the read - * session, and may change as other streams continue to read data. - * - * @param {Object} request - * The request object that will be sent. - * @param {google.cloud.bigquery.storage.v1beta1.StreamPosition} request.readPosition - * Required. Identifier of the position in the stream to start reading from. - * The offset requested must be less than the last row read from ReadRows. - * Requesting a larger offset is undefined. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Stream} - * An object stream which emits [ReadRowsResponse]{@link google.cloud.bigquery.storage.v1beta1.ReadRowsResponse} on 'data' event. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#server-streaming) - * for more details and examples. - * @example include:samples/generated/v1beta1/big_query_storage.read_rows.js - * region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_ReadRows_async - */ - readRows( - request?: protos.google.cloud.bigquery.storage.v1beta1.IReadRowsRequest, - options?: CallOptions): - gax.CancellableStream{ - request = request || {}; - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - options.otherArgs.headers[ - 'x-goog-request-params' - ] = gax.routingHeader.fromParams({ - 'read_position.stream.name': request.readPosition!.stream!.name || '', - }); - this.initialize(); - return this.innerApiCalls.readRows(request, options); - } - - // -------------------- - // -- Path templates -- - // -------------------- - - /** - * Return a fully-qualified project resource name string. - * - * @param {string} project - * @returns {string} Resource name string. - */ - projectPath(project:string) { - return this.pathTemplates.projectPathTemplate.render({ - project: project, - }); - } - - /** - * Parse the project from Project resource. - * - * @param {string} projectName - * A fully-qualified path representing Project resource. - * @returns {string} A string representing the project. - */ - matchProjectFromProjectName(projectName: string) { - return this.pathTemplates.projectPathTemplate.match(projectName).project; - } - - /** - * Return a fully-qualified readSession resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} session - * @returns {string} Resource name string. - */ - readSessionPath(project:string,location:string,session:string) { - return this.pathTemplates.readSessionPathTemplate.render({ - project: project, - location: location, - session: session, - }); - } - - /** - * Parse the project from ReadSession resource. - * - * @param {string} readSessionName - * A fully-qualified path representing ReadSession resource. - * @returns {string} A string representing the project. - */ - matchProjectFromReadSessionName(readSessionName: string) { - return this.pathTemplates.readSessionPathTemplate.match(readSessionName).project; - } - - /** - * Parse the location from ReadSession resource. - * - * @param {string} readSessionName - * A fully-qualified path representing ReadSession resource. - * @returns {string} A string representing the location. - */ - matchLocationFromReadSessionName(readSessionName: string) { - return this.pathTemplates.readSessionPathTemplate.match(readSessionName).location; - } - - /** - * Parse the session from ReadSession resource. - * - * @param {string} readSessionName - * A fully-qualified path representing ReadSession resource. - * @returns {string} A string representing the session. - */ - matchSessionFromReadSessionName(readSessionName: string) { - return this.pathTemplates.readSessionPathTemplate.match(readSessionName).session; - } - - /** - * Return a fully-qualified stream resource name string. - * - * @param {string} project - * @param {string} location - * @param {string} stream - * @returns {string} Resource name string. - */ - streamPath(project:string,location:string,stream:string) { - return this.pathTemplates.streamPathTemplate.render({ - project: project, - location: location, - stream: stream, - }); - } - - /** - * Parse the project from Stream resource. - * - * @param {string} streamName - * A fully-qualified path representing Stream resource. - * @returns {string} A string representing the project. - */ - matchProjectFromStreamName(streamName: string) { - return this.pathTemplates.streamPathTemplate.match(streamName).project; - } - - /** - * Parse the location from Stream resource. - * - * @param {string} streamName - * A fully-qualified path representing Stream resource. - * @returns {string} A string representing the location. - */ - matchLocationFromStreamName(streamName: string) { - return this.pathTemplates.streamPathTemplate.match(streamName).location; - } - - /** - * Parse the stream from Stream resource. - * - * @param {string} streamName - * A fully-qualified path representing Stream resource. - * @returns {string} A string representing the stream. - */ - matchStreamFromStreamName(streamName: string) { - return this.pathTemplates.streamPathTemplate.match(streamName).stream; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.bigQueryStorageStub && !this._terminated) { - return this.bigQueryStorageStub.then(stub => { - this._terminated = true; - stub.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client_config.json b/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client_config.json deleted file mode 100644 index 003cb084..00000000 --- a/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_client_config.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "interfaces": { - "google.cloud.bigquery.storage.v1beta1.BigQueryStorage": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ], - "unavailable": [ - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "CreateReadSession": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - }, - "ReadRows": { - "timeout_millis": 86400000, - "retry_codes_name": "unavailable", - "retry_params_name": "default" - }, - "BatchCreateReadSessionStreams": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - }, - "FinalizeStream": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - }, - "SplitReadStream": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - } - } - } - } -} diff --git a/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_proto_list.json b/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_proto_list.json deleted file mode 100644 index 0b801075..00000000 --- a/owl-bot-staging/v1beta1/src/v1beta1/big_query_storage_proto_list.json +++ /dev/null @@ -1,7 +0,0 @@ -[ - "../../protos/google/cloud/bigquery/storage/v1beta1/arrow.proto", - "../../protos/google/cloud/bigquery/storage/v1beta1/avro.proto", - "../../protos/google/cloud/bigquery/storage/v1beta1/read_options.proto", - "../../protos/google/cloud/bigquery/storage/v1beta1/storage.proto", - "../../protos/google/cloud/bigquery/storage/v1beta1/table_reference.proto" -] diff --git a/owl-bot-staging/v1beta1/src/v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/src/v1beta1/gapic_metadata.json deleted file mode 100644 index 00d888bf..00000000 --- a/owl-bot-staging/v1beta1/src/v1beta1/gapic_metadata.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "typescript", - "protoPackage": "google.cloud.bigquery.storage.v1beta1", - "libraryPackage": "@google-cloud/bigquery-storage", - "services": { - "BigQueryStorage": { - "clients": { - "grpc": { - "libraryClient": "BigQueryStorageClient", - "rpcs": { - "CreateReadSession": { - "methods": [ - "createReadSession" - ] - }, - "BatchCreateReadSessionStreams": { - "methods": [ - "batchCreateReadSessionStreams" - ] - }, - "FinalizeStream": { - "methods": [ - "finalizeStream" - ] - }, - "SplitReadStream": { - "methods": [ - "splitReadStream" - ] - }, - "ReadRows": { - "methods": [ - "readRows" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "BigQueryStorageClient", - "rpcs": { - "CreateReadSession": { - "methods": [ - "createReadSession" - ] - }, - "BatchCreateReadSessionStreams": { - "methods": [ - "batchCreateReadSessionStreams" - ] - }, - "FinalizeStream": { - "methods": [ - "finalizeStream" - ] - }, - "SplitReadStream": { - "methods": [ - "splitReadStream" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1beta1/src/v1beta1/index.ts b/owl-bot-staging/v1beta1/src/v1beta1/index.ts deleted file mode 100644 index dc3afed8..00000000 --- a/owl-bot-staging/v1beta1/src/v1beta1/index.ts +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -export {BigQueryStorageClient} from './big_query_storage_client'; diff --git a/owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.js deleted file mode 100644 index cc56b095..00000000 --- a/owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.js +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -/* eslint-disable node/no-missing-require, no-unused-vars */ -const storage = require('@google-cloud/bigquery-storage'); - -function main() { - const bigQueryStorageClient = new storage.BigQueryStorageClient(); -} - -main(); diff --git a/owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.ts deleted file mode 100644 index 6e4fec44..00000000 --- a/owl-bot-staging/v1beta1/system-test/fixtures/sample/src/index.ts +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import {BigQueryStorageClient} from '@google-cloud/bigquery-storage'; - -// check that the client class type name can be used -function doStuffWithBigQueryStorageClient(client: BigQueryStorageClient) { - client.close(); -} - -function main() { - // check that the client instance can be created - const bigQueryStorageClient = new BigQueryStorageClient(); - doStuffWithBigQueryStorageClient(bigQueryStorageClient); -} - -main(); diff --git a/owl-bot-staging/v1beta1/system-test/install.ts b/owl-bot-staging/v1beta1/system-test/install.ts deleted file mode 100644 index 8ec45222..00000000 --- a/owl-bot-staging/v1beta1/system-test/install.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import { packNTest } from 'pack-n-play'; -import { readFileSync } from 'fs'; -import { describe, it } from 'mocha'; - -describe('📦 pack-n-play test', () => { - - it('TypeScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'TypeScript user can use the type definitions', - ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() - } - }; - await packNTest(options); - }); - - it('JavaScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'JavaScript user can use the library', - ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() - } - }; - await packNTest(options); - }); - -}); diff --git a/owl-bot-staging/v1beta1/test/gapic_big_query_storage_v1beta1.ts b/owl-bot-staging/v1beta1/test/gapic_big_query_storage_v1beta1.ts deleted file mode 100644 index 62d96796..00000000 --- a/owl-bot-staging/v1beta1/test/gapic_big_query_storage_v1beta1.ts +++ /dev/null @@ -1,781 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import { describe, it } from 'mocha'; -import * as bigquerystorageModule from '../src'; - -import {PassThrough} from 'stream'; - -import {protobuf} from 'google-gax'; - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { - return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); -} - -function stubServerStreamingCall(response?: ResponseType, error?: Error) { - const transformStub = error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); - const mockStream = new PassThrough({ - objectMode: true, - transform: transformStub, - }); - // write something to the stream to trigger transformStub and send the response back to the client - setImmediate(() => { mockStream.write({}); }); - setImmediate(() => { mockStream.end(); }); - return sinon.stub().returns(mockStream); -} - -describe('v1beta1.BigQueryStorageClient', () => { - it('has servicePath', () => { - const servicePath = bigquerystorageModule.v1beta1.BigQueryStorageClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = bigquerystorageModule.v1beta1.BigQueryStorageClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = bigquerystorageModule.v1beta1.BigQueryStorageClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.bigQueryStorageStub, undefined); - await client.initialize(); - assert(client.bigQueryStorageStub); - }); - - it('has close method for the initialized client', done => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.bigQueryStorageStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.bigQueryStorageStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - - describe('createReadSession', () => { - it('invokes createReadSession without error', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest()); - request.tableReference = {}; - request.tableReference.projectId = ''; - request.tableReference = {}; - request.tableReference.datasetId = ''; - const expectedHeaderRequestParams = "table_reference.project_id=&table_reference.dataset_id="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.ReadSession()); - client.innerApiCalls.createReadSession = stubSimpleCall(expectedResponse); - const [response] = await client.createReadSession(request); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.createReadSession as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes createReadSession without error using callback', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest()); - request.tableReference = {}; - request.tableReference.projectId = ''; - request.tableReference = {}; - request.tableReference.datasetId = ''; - const expectedHeaderRequestParams = "table_reference.project_id=&table_reference.dataset_id="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.ReadSession()); - client.innerApiCalls.createReadSession = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.createReadSession( - request, - (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1beta1.IReadSession|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.createReadSession as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes createReadSession with error', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest()); - request.tableReference = {}; - request.tableReference.projectId = ''; - request.tableReference = {}; - request.tableReference.datasetId = ''; - const expectedHeaderRequestParams = "table_reference.project_id=&table_reference.dataset_id="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedError = new Error('expected'); - client.innerApiCalls.createReadSession = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.createReadSession(request), expectedError); - assert((client.innerApiCalls.createReadSession as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes createReadSession with closed client', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest()); - request.tableReference = {}; - request.tableReference.projectId = ''; - request.tableReference = {}; - request.tableReference.datasetId = ''; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.createReadSession(request), expectedError); - }); - }); - - describe('batchCreateReadSessionStreams', () => { - it('invokes batchCreateReadSessionStreams without error', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest()); - request.session = {}; - request.session.name = ''; - const expectedHeaderRequestParams = "session.name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse()); - client.innerApiCalls.batchCreateReadSessionStreams = stubSimpleCall(expectedResponse); - const [response] = await client.batchCreateReadSessionStreams(request); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.batchCreateReadSessionStreams as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes batchCreateReadSessionStreams without error using callback', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest()); - request.session = {}; - request.session.name = ''; - const expectedHeaderRequestParams = "session.name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse()); - client.innerApiCalls.batchCreateReadSessionStreams = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.batchCreateReadSessionStreams( - request, - (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.batchCreateReadSessionStreams as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes batchCreateReadSessionStreams with error', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest()); - request.session = {}; - request.session.name = ''; - const expectedHeaderRequestParams = "session.name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedError = new Error('expected'); - client.innerApiCalls.batchCreateReadSessionStreams = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.batchCreateReadSessionStreams(request), expectedError); - assert((client.innerApiCalls.batchCreateReadSessionStreams as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes batchCreateReadSessionStreams with closed client', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest()); - request.session = {}; - request.session.name = ''; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.batchCreateReadSessionStreams(request), expectedError); - }); - }); - - describe('finalizeStream', () => { - it('invokes finalizeStream without error', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest()); - request.stream = {}; - request.stream.name = ''; - const expectedHeaderRequestParams = "stream.name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.protobuf.Empty()); - client.innerApiCalls.finalizeStream = stubSimpleCall(expectedResponse); - const [response] = await client.finalizeStream(request); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.finalizeStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes finalizeStream without error using callback', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest()); - request.stream = {}; - request.stream.name = ''; - const expectedHeaderRequestParams = "stream.name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.protobuf.Empty()); - client.innerApiCalls.finalizeStream = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.finalizeStream( - request, - (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.finalizeStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes finalizeStream with error', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest()); - request.stream = {}; - request.stream.name = ''; - const expectedHeaderRequestParams = "stream.name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedError = new Error('expected'); - client.innerApiCalls.finalizeStream = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.finalizeStream(request), expectedError); - assert((client.innerApiCalls.finalizeStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes finalizeStream with closed client', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest()); - request.stream = {}; - request.stream.name = ''; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.finalizeStream(request), expectedError); - }); - }); - - describe('splitReadStream', () => { - it('invokes splitReadStream without error', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest()); - request.originalStream = {}; - request.originalStream.name = ''; - const expectedHeaderRequestParams = "original_stream.name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse()); - client.innerApiCalls.splitReadStream = stubSimpleCall(expectedResponse); - const [response] = await client.splitReadStream(request); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.splitReadStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes splitReadStream without error using callback', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest()); - request.originalStream = {}; - request.originalStream.name = ''; - const expectedHeaderRequestParams = "original_stream.name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse()); - client.innerApiCalls.splitReadStream = stubSimpleCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.splitReadStream( - request, - (err?: Error|null, result?: protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse|null) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.splitReadStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes splitReadStream with error', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest()); - request.originalStream = {}; - request.originalStream.name = ''; - const expectedHeaderRequestParams = "original_stream.name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedError = new Error('expected'); - client.innerApiCalls.splitReadStream = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.splitReadStream(request), expectedError); - assert((client.innerApiCalls.splitReadStream as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes splitReadStream with closed client', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest()); - request.originalStream = {}; - request.originalStream.name = ''; - const expectedError = new Error('The client has already been closed.'); - client.close(); - await assert.rejects(client.splitReadStream(request), expectedError); - }); - }); - - describe('readRows', () => { - it('invokes readRows without error', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.ReadRowsRequest()); - request.readPosition = {}; - request.readPosition.stream = {}; - request.readPosition.stream.name = ''; - const expectedHeaderRequestParams = "read_position.stream.name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedResponse = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.ReadRowsResponse()); - client.innerApiCalls.readRows = stubServerStreamingCall(expectedResponse); - const stream = client.readRows(request); - const promise = new Promise((resolve, reject) => { - stream.on('data', (response: protos.google.cloud.bigquery.storage.v1beta1.ReadRowsResponse) => { - resolve(response); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.readRows as SinonStub) - .getCall(0).calledWith(request, expectedOptions)); - }); - - it('invokes readRows with error', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.ReadRowsRequest()); - request.readPosition = {}; - request.readPosition.stream = {}; - request.readPosition.stream.name = ''; - const expectedHeaderRequestParams = "read_position.stream.name="; - const expectedOptions = { - otherArgs: { - headers: { - 'x-goog-request-params': expectedHeaderRequestParams, - }, - }, - }; - const expectedError = new Error('expected'); - client.innerApiCalls.readRows = stubServerStreamingCall(undefined, expectedError); - const stream = client.readRows(request); - const promise = new Promise((resolve, reject) => { - stream.on('data', (response: protos.google.cloud.bigquery.storage.v1beta1.ReadRowsResponse) => { - resolve(response); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - await assert.rejects(promise, expectedError); - assert((client.innerApiCalls.readRows as SinonStub) - .getCall(0).calledWith(request, expectedOptions)); - }); - - it('invokes readRows with closed client', async () => { - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.bigquery.storage.v1beta1.ReadRowsRequest()); - request.readPosition = {}; - request.readPosition.stream = {}; - request.readPosition.stream.name = ''; - const expectedError = new Error('The client has already been closed.'); - client.close(); - const stream = client.readRows(request); - const promise = new Promise((resolve, reject) => { - stream.on('data', (response: protos.google.cloud.bigquery.storage.v1beta1.ReadRowsResponse) => { - resolve(response); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - }); - await assert.rejects(promise, expectedError); - }); - }); - - describe('Path templates', () => { - - describe('project', () => { - const fakePath = "/rendered/path/project"; - const expectedParameters = { - project: "projectValue", - }; - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.projectPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.projectPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('projectPath', () => { - const result = client.projectPath("projectValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.projectPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromProjectName', () => { - const result = client.matchProjectFromProjectName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.projectPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('readSession', () => { - const fakePath = "/rendered/path/readSession"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - session: "sessionValue", - }; - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.readSessionPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.readSessionPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('readSessionPath', () => { - const result = client.readSessionPath("projectValue", "locationValue", "sessionValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.readSessionPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromReadSessionName', () => { - const result = client.matchProjectFromReadSessionName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromReadSessionName', () => { - const result = client.matchLocationFromReadSessionName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchSessionFromReadSessionName', () => { - const result = client.matchSessionFromReadSessionName(fakePath); - assert.strictEqual(result, "sessionValue"); - assert((client.pathTemplates.readSessionPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - - describe('stream', () => { - const fakePath = "/rendered/path/stream"; - const expectedParameters = { - project: "projectValue", - location: "locationValue", - stream: "streamValue", - }; - const client = new bigquerystorageModule.v1beta1.BigQueryStorageClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - client.pathTemplates.streamPathTemplate.render = - sinon.stub().returns(fakePath); - client.pathTemplates.streamPathTemplate.match = - sinon.stub().returns(expectedParameters); - - it('streamPath', () => { - const result = client.streamPath("projectValue", "locationValue", "streamValue"); - assert.strictEqual(result, fakePath); - assert((client.pathTemplates.streamPathTemplate.render as SinonStub) - .getCall(-1).calledWith(expectedParameters)); - }); - - it('matchProjectFromStreamName', () => { - const result = client.matchProjectFromStreamName(fakePath); - assert.strictEqual(result, "projectValue"); - assert((client.pathTemplates.streamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchLocationFromStreamName', () => { - const result = client.matchLocationFromStreamName(fakePath); - assert.strictEqual(result, "locationValue"); - assert((client.pathTemplates.streamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - - it('matchStreamFromStreamName', () => { - const result = client.matchStreamFromStreamName(fakePath); - assert.strictEqual(result, "streamValue"); - assert((client.pathTemplates.streamPathTemplate.match as SinonStub) - .getCall(-1).calledWith(fakePath)); - }); - }); - }); -}); diff --git a/owl-bot-staging/v1beta1/tsconfig.json b/owl-bot-staging/v1beta1/tsconfig.json deleted file mode 100644 index c78f1c88..00000000 --- a/owl-bot-staging/v1beta1/tsconfig.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "extends": "./node_modules/gts/tsconfig-google.json", - "compilerOptions": { - "rootDir": ".", - "outDir": "build", - "resolveJsonModule": true, - "lib": [ - "es2018", - "dom" - ] - }, - "include": [ - "src/*.ts", - "src/**/*.ts", - "test/*.ts", - "test/**/*.ts", - "system-test/*.ts" - ] -} diff --git a/owl-bot-staging/v1beta1/webpack.config.js b/owl-bot-staging/v1beta1/webpack.config.js deleted file mode 100644 index 5f047fa3..00000000 --- a/owl-bot-staging/v1beta1/webpack.config.js +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const path = require('path'); - -module.exports = { - entry: './src/index.ts', - output: { - library: 'BigQueryStorage', - filename: './big-query-storage.js', - }, - node: { - child_process: 'empty', - fs: 'empty', - crypto: 'empty', - }, - resolve: { - alias: { - '../../../package.json': path.resolve(__dirname, 'package.json'), - }, - extensions: ['.js', '.json', '.ts'], - }, - module: { - rules: [ - { - test: /\.tsx?$/, - use: 'ts-loader', - exclude: /node_modules/ - }, - { - test: /node_modules[\\/]@grpc[\\/]grpc-js/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]grpc/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]retry-request/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]https?-proxy-agent/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]gtoken/, - use: 'null-loader' - }, - ], - }, - mode: 'production', -}; diff --git a/src/v1/big_query_read_client.ts b/src/v1/big_query_read_client.ts index a0246aef..525177ca 100644 --- a/src/v1/big_query_read_client.ts +++ b/src/v1/big_query_read_client.ts @@ -185,7 +185,8 @@ export class BigQueryReadClient { // Provide descriptors for these. this.descriptors.stream = { readRows: new this._gaxModule.StreamDescriptor( - gax.StreamType.SERVER_STREAMING + gax.StreamType.SERVER_STREAMING, + opts.fallback === 'rest' ), }; diff --git a/src/v1/big_query_write_client.ts b/src/v1/big_query_write_client.ts index 1cb67db7..2ab96c5d 100644 --- a/src/v1/big_query_write_client.ts +++ b/src/v1/big_query_write_client.ts @@ -188,7 +188,8 @@ export class BigQueryWriteClient { // Provide descriptors for these. this.descriptors.stream = { appendRows: new this._gaxModule.StreamDescriptor( - gax.StreamType.BIDI_STREAMING + gax.StreamType.BIDI_STREAMING, + opts.fallback === 'rest' ), }; diff --git a/src/v1beta1/big_query_storage_client.ts b/src/v1beta1/big_query_storage_client.ts index 342ef1a6..544d6d47 100644 --- a/src/v1beta1/big_query_storage_client.ts +++ b/src/v1beta1/big_query_storage_client.ts @@ -179,7 +179,8 @@ export class BigQueryStorageClient { // Provide descriptors for these. this.descriptors.stream = { readRows: new this._gaxModule.StreamDescriptor( - gax.StreamType.SERVER_STREAMING + gax.StreamType.SERVER_STREAMING, + opts.fallback === 'rest' ), };