diff --git a/.github/workflows/unmanaged_dependency_check.yaml b/.github/workflows/unmanaged_dependency_check.yaml index 9e3961b938..add82ef961 100644 --- a/.github/workflows/unmanaged_dependency_check.yaml +++ b/.github/workflows/unmanaged_dependency_check.yaml @@ -14,6 +14,6 @@ jobs: shell: bash run: .kokoro/build.sh - name: Unmanaged dependency check - uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v3.28.1 + uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v3.29.0 with: bom-path: google-cloud-bigtable-bom/pom.xml diff --git a/.kokoro/presubmit/graalvm-native-17.cfg b/.kokoro/presubmit/graalvm-native-17.cfg index f97c4740aa..bb749969af 100644 --- a/.kokoro/presubmit/graalvm-native-17.cfg +++ b/.kokoro/presubmit/graalvm-native-17.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.28.1" + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.29.0" } env_vars: { diff --git a/.kokoro/presubmit/graalvm-native.cfg b/.kokoro/presubmit/graalvm-native.cfg index 2e7bab018f..31a1a3ea63 100644 --- a/.kokoro/presubmit/graalvm-native.cfg +++ b/.kokoro/presubmit/graalvm-native.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.28.1" + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.29.0" } env_vars: { diff --git a/.readme-partials.yml b/.readme-partials.yml index dc9a9e2377..c9386d33fb 100644 --- a/.readme-partials.yml +++ b/.readme-partials.yml @@ -115,7 +115,7 @@ custom_content: | TIP: If you are experiencing version conflicts with gRPC, see [Version Conflicts](#version-conflicts). - ## Enabling client side metrics + ## Client side metrics Cloud Bigtable client supports publishing client side metrics to [Cloud Monitoring](https://cloud.google.com/monitoring/docs/monitoring-overview) under the @@ -124,6 +124,31 @@ custom_content: | This feature is available once you upgrade to version 2.16.0 and above. Follow the guide on https://cloud.google.com/bigtable/docs/client-side-metrics-setup to enable. + Since version 2.38.0, [client side metrics](https://cloud.google.com/bigtable/docs/client-side-metrics) + is enabled by default. This feature collects useful telemetry data in the client and is recommended to + use in conjunction with server-side metrics to get a complete, actionable view of your Bigtable + performance. There is no additional cost to publish and view client-side metrics + in Cloud Monitoring. + + ### Opt-out client side metrics + + You can opt-out client side metrics with the following settings: + + ```java + BigtableDataSettings settings = BigtableDataSettings.newBuilder() + .setProjectId("my-project") + .setInstanceId("my-instance") + .setMetricsProvider(NoopMetricsProvider.INSTANCE) + .build(); + ``` + + ### Use a custom OpenTelemetry instance + + If your application already has OpenTelemetry integration, you can register client side metrics on + your OpenTelemetry instance. You can refer to + [CustomOpenTelemetryMetricsProvider](https://github.com/googleapis/java-bigtable/blob/main/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java) + on how to set it up. + ## Client request tracing: OpenCensus Tracing Cloud Bigtable client supports [OpenCensus Tracing](https://opencensus.io/tracing/), @@ -138,13 +163,13 @@ custom_content: | io.opencensus opencensus-impl - 0.24.0 + 0.31.1 runtime io.opencensus opencensus-exporter-trace-stackdriver - 0.24.0 + 0.31.1 io.grpc @@ -197,140 +222,46 @@ custom_content: | ); ``` - ## Enabling Cloud Bigtable Metrics: OpenCensus Stats - - --- - Note: We recommend [enabling client side built-in metrics](#enabling-client-side-metrics) - if you want to view your metrics on cloud monitoring. This integration is only for exporting the - metrics to a third party dashboard. - --- - - Cloud Bigtable client supports [Opencensus Metrics](https://opencensus.io/stats/), - which gives insight into the client internals and aids in debugging production issues. - All Cloud Bigtable Metrics are prefixed with `cloud.google.com/java/bigtable/`. The - metrics will be tagged with: - * `bigtable_project_id`: the project that contains the target Bigtable instance. - Please note that this id could be different from project that the client is running - in and different from the project where the metrics are exported to. - * `bigtable_instance_id`: the instance id of the target Bigtable instance - * `bigtable_app_profile_id`: the app profile id that is being used to access the target - Bigtable instance - - ### Available operation level metric views: - - * `cloud.google.com/java/bigtable/op_latency`: A distribution of latency of - each client method call, across all of it's RPC attempts. Tagged by - operation name and final response status. - - * `cloud.google.com/java/bigtable/completed_ops`: The total count of - method invocations. Tagged by operation name and final response status. - - * `cloud.google.com/java/bigtable/read_rows_first_row_latency`: A - distribution of the latency of receiving the first row in a ReadRows - operation. - - * `cloud.google.com/java/bigtable/attempt_latency`: A distribution of latency of - each client RPC, tagged by operation name and the attempt status. Under normal - circumstances, this will be identical to op_latency. However, when the client - receives transient errors, op_latency will be the sum of all attempt_latencies - and the exponential delays. + ### Disable Bigtbale traces - * `cloud.google.com/java/bigtable/attempts_per_op`: A distribution of attempts that - each operation required, tagged by operation name and final operation status. - Under normal circumstances, this will be 1. - - #### GFE metric views: - * `cloud.google.com/java/bigtable/gfe_latency`: A distribution of the latency - between Google's network receives an RPC and reads back the first byte of - the response. - - * `cloud.google.com/java/bigtable/gfe_header_missing_count`: A counter of the - number of RPC responses received without the server-timing header, which - indicates that the request probably never reached Google's network. - - By default, the functionality is disabled. For example to enable metrics using - [Google Stackdriver](https://cloud.google.com/monitoring/docs/): - - - [//]: # (TODO: figure out how to keep opencensus version in sync with pom.xml) - - If you are using Maven, add this to your pom.xml file - ```xml - - io.opencensus - opencensus-impl - 0.24.0 - runtime - - - io.opencensus - opencensus-exporter-stats-stackdriver - 0.24.0 - - - io.grpc - * - - - com.google.auth - * - - - - ``` - If you are using Gradle, add this to your dependencies - ```Groovy - compile 'io.opencensus:opencensus-impl:0.24.0' - compile 'io.opencensus:opencensus-exporter-stats-stackdriver:0.24.0' - ``` - If you are using SBT, add this to your dependencies - ```Scala - libraryDependencies += "io.opencensus" % "opencensus-impl" % "0.24.0" - libraryDependencies += "io.opencensus" % "opencensus-exporter-stats-stackdriver" % "0.24.0" - ``` - - At the start of your application configure the exporter and enable the Bigtable stats views: + If your application already has OpenCensus Tracing integration and you want to disable Bigtable + traces, you can do the following: ```java - import io.opencensus.exporter.stats.stackdriver.StackdriverStatsConfiguration; - import io.opencensus.exporter.stats.stackdriver.StackdriverStatsExporter; - - StackdriverStatsExporter.createAndRegister( - StackdriverStatsConfiguration.builder() - .setProjectId("YOUR_PROJECT_ID") - .build() - ); - - BigtableDataSettings.enableOpenCensusStats(); - // Enable GFE metric views - BigtableDataSettings.enableGfeOpenCensusStats(); + public static class MySampler extends Sampler { + + private final Sampler childSampler; + + MySampler(Sampler child) { + this.childSampler = child; + } + + @Override + public boolean shouldSample(@Nullable SpanContext parentContext, + @Nullable Boolean hasRemoteParent, + TraceId traceId, + SpanId spanId, + String name, + List parentLinks) { + if (name.contains("Bigtable")) { + return false; + } + return childSampler.shouldSample(parentContext, hasRemoteParent, traceId, spanId, name, parentLinks); + } + + @Override + public String getDescription() { + return "from my sampler"; + } + } ``` - You can view the metrics on the Google Cloud Platform Console - [Metrics explorer](https://console.cloud.google.com/monitoring/metrics-explorer) - page. - - You can configure how frequently metrics are pushed to StackDriver and the - [Monitored resource type](https://cloud.google.com/monitoring/api/resources) by - updating `StackdriverStatsConfiguration`: - - ``` java - // Example: configuring export interval and monitored resource type - StackdriverStatsExporter.createAndRegister( - StackdriverStatsConfiguration.builder() - .setProjectId("YOUR_PROJECT_ID") - // Exporting metrics every 10 seconds - .setExportInterval(Duration.create(10, 0)) - // Configure monitored resource type. A common practice is to use the - // monitored resource objects that represent the physical resources - // where your application code is running. See the full list of - // monitored resource type here: - // https://cloud.google.com/monitoring/api/resources - .setMonitoredResource(MonitoredResource.newBuilder() - .setType("global") - .putLabels("project_id", "YOUR_PROJECT_ID") - .build()) - .build() + And use this sampler in your trace config: + ```java + Tracing.getTraceConfig().updateActiveTraceParams( + Tracing.getTraceConfig().getActiveTraceParams().toBuilder() + .setSampler(new MySampler(Samplers.probabilitySampler(0.1))) + .build() ); ``` diff --git a/.repo-metadata.json b/.repo-metadata.json index 8724fd67dc..aa4b44f8ff 100644 --- a/.repo-metadata.json +++ b/.repo-metadata.json @@ -13,5 +13,6 @@ "api_id": "bigtable.googleapis.com", "library_type": "GAPIC_COMBO", "extra_versioned_modules": "google-cloud-bigtable-emulator,google-cloud-bigtable-emulator-core", - "excluded_poms": "google-cloud-bigtable-bom" + "excluded_poms": "google-cloud-bigtable-bom", + "recommended_package": "com.google.cloud.bigtable" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d8d587a3af..1847a8898a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## [2.38.0](https://github.com/googleapis/java-bigtable/compare/v2.37.0...v2.38.0) (2024-04-15) + + +### Features + +* Add Data Boost configurations to admin API ([f29c5bb](https://github.com/googleapis/java-bigtable/commit/f29c5bba08daffe2721454db1714f6ea6f47fc66)) +* Add feature flag for client side metrics ([#2179](https://github.com/googleapis/java-bigtable/issues/2179)) ([f29c5bb](https://github.com/googleapis/java-bigtable/commit/f29c5bba08daffe2721454db1714f6ea6f47fc66)) +* Migrate to OTEL and enable metrics by default ([#2166](https://github.com/googleapis/java-bigtable/issues/2166)) ([1682939](https://github.com/googleapis/java-bigtable/commit/168293937cc7f438a3ec2dee46805aa8e12089c4)) + + +### Bug Fixes + +* Add more error handling ([#2203](https://github.com/googleapis/java-bigtable/issues/2203)) ([c2a63f7](https://github.com/googleapis/java-bigtable/commit/c2a63f7627f2aa6e2e51ec3e185abfa5234ad3e4)) +* Fix export to log detect resource errors ([#2197](https://github.com/googleapis/java-bigtable/issues/2197)) ([d32fbb7](https://github.com/googleapis/java-bigtable/commit/d32fbb78bbde2ad04103ab7b2c1176a6df72d0a3)) + ## [2.37.0](https://github.com/googleapis/java-bigtable/compare/v2.36.0...v2.37.0) (2024-03-27) diff --git a/README.md b/README.md index 2be1dd732f..72121d40b5 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ If you are using Maven with [BOM][libraries-bom], add this to your pom.xml file: com.google.cloud libraries-bom - 26.25.0 + 26.37.0 pom import @@ -42,7 +42,7 @@ If you are using Maven without the BOM, add this to your dependencies: com.google.cloud google-cloud-bigtable - 2.36.0 + 2.38.0 ``` @@ -215,7 +215,7 @@ try { TIP: If you are experiencing version conflicts with gRPC, see [Version Conflicts](#version-conflicts). -## Enabling client side metrics +## Client side metrics Cloud Bigtable client supports publishing client side metrics to [Cloud Monitoring](https://cloud.google.com/monitoring/docs/monitoring-overview) under the @@ -224,6 +224,31 @@ Cloud Bigtable client supports publishing client side metrics to This feature is available once you upgrade to version 2.16.0 and above. Follow the guide on https://cloud.google.com/bigtable/docs/client-side-metrics-setup to enable. +Since version 2.38.0, [client side metrics](https://cloud.google.com/bigtable/docs/client-side-metrics) +is enabled by default. This feature collects useful telemetry data in the client and is recommended to +use in conjunction with server-side metrics to get a complete, actionable view of your Bigtable +performance. There is no additional cost to publish and view client-side metrics +in Cloud Monitoring. + +### Opt-out client side metrics + +You can opt-out client side metrics with the following settings: + +```java +BigtableDataSettings settings = BigtableDataSettings.newBuilder() + .setProjectId("my-project") + .setInstanceId("my-instance") + .setMetricsProvider(NoopMetricsProvider.INSTANCE) + .build(); +``` + +### Use a custom OpenTelemetry instance + +If your application already has OpenTelemetry integration, you can register client side metrics on +your OpenTelemetry instance. You can refer to +[CustomOpenTelemetryMetricsProvider](https://github.com/googleapis/java-bigtable/blob/main/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java) +on how to set it up. + ## Client request tracing: OpenCensus Tracing Cloud Bigtable client supports [OpenCensus Tracing](https://opencensus.io/tracing/), @@ -238,13 +263,13 @@ If you are using Maven, add this to your pom.xml file io.opencensus opencensus-impl - 0.24.0 + 0.31.1 runtime io.opencensus opencensus-exporter-trace-stackdriver - 0.24.0 + 0.31.1 io.grpc @@ -297,140 +322,46 @@ Tracing.getTraceConfig().updateActiveTraceParams( ); ``` -## Enabling Cloud Bigtable Metrics: OpenCensus Stats - ---- -Note: We recommend [enabling client side built-in metrics](#enabling-client-side-metrics) -if you want to view your metrics on cloud monitoring. This integration is only for exporting the -metrics to a third party dashboard. ---- - -Cloud Bigtable client supports [Opencensus Metrics](https://opencensus.io/stats/), -which gives insight into the client internals and aids in debugging production issues. -All Cloud Bigtable Metrics are prefixed with `cloud.google.com/java/bigtable/`. The -metrics will be tagged with: - * `bigtable_project_id`: the project that contains the target Bigtable instance. - Please note that this id could be different from project that the client is running - in and different from the project where the metrics are exported to. -* `bigtable_instance_id`: the instance id of the target Bigtable instance -* `bigtable_app_profile_id`: the app profile id that is being used to access the target - Bigtable instance - -### Available operation level metric views: - -* `cloud.google.com/java/bigtable/op_latency`: A distribution of latency of - each client method call, across all of it's RPC attempts. Tagged by - operation name and final response status. - -* `cloud.google.com/java/bigtable/completed_ops`: The total count of - method invocations. Tagged by operation name and final response status. - -* `cloud.google.com/java/bigtable/read_rows_first_row_latency`: A - distribution of the latency of receiving the first row in a ReadRows - operation. - -* `cloud.google.com/java/bigtable/attempt_latency`: A distribution of latency of - each client RPC, tagged by operation name and the attempt status. Under normal - circumstances, this will be identical to op_latency. However, when the client - receives transient errors, op_latency will be the sum of all attempt_latencies - and the exponential delays. +### Disable Bigtbale traces -* `cloud.google.com/java/bigtable/attempts_per_op`: A distribution of attempts that - each operation required, tagged by operation name and final operation status. - Under normal circumstances, this will be 1. - -#### GFE metric views: -* `cloud.google.com/java/bigtable/gfe_latency`: A distribution of the latency -between Google's network receives an RPC and reads back the first byte of -the response. - -* `cloud.google.com/java/bigtable/gfe_header_missing_count`: A counter of the -number of RPC responses received without the server-timing header, which -indicates that the request probably never reached Google's network. - -By default, the functionality is disabled. For example to enable metrics using -[Google Stackdriver](https://cloud.google.com/monitoring/docs/): - - -[//]: # (TODO: figure out how to keep opencensus version in sync with pom.xml) - -If you are using Maven, add this to your pom.xml file -```xml - - io.opencensus - opencensus-impl - 0.24.0 - runtime - - - io.opencensus - opencensus-exporter-stats-stackdriver - 0.24.0 - - - io.grpc - * - - - com.google.auth - * - - - -``` -If you are using Gradle, add this to your dependencies -```Groovy -compile 'io.opencensus:opencensus-impl:0.24.0' -compile 'io.opencensus:opencensus-exporter-stats-stackdriver:0.24.0' -``` -If you are using SBT, add this to your dependencies -```Scala -libraryDependencies += "io.opencensus" % "opencensus-impl" % "0.24.0" -libraryDependencies += "io.opencensus" % "opencensus-exporter-stats-stackdriver" % "0.24.0" -``` - -At the start of your application configure the exporter and enable the Bigtable stats views: +If your application already has OpenCensus Tracing integration and you want to disable Bigtable +traces, you can do the following: ```java -import io.opencensus.exporter.stats.stackdriver.StackdriverStatsConfiguration; -import io.opencensus.exporter.stats.stackdriver.StackdriverStatsExporter; - -StackdriverStatsExporter.createAndRegister( - StackdriverStatsConfiguration.builder() - .setProjectId("YOUR_PROJECT_ID") - .build() -); - -BigtableDataSettings.enableOpenCensusStats(); -// Enable GFE metric views -BigtableDataSettings.enableGfeOpenCensusStats(); +public static class MySampler extends Sampler { + + private final Sampler childSampler; + + MySampler(Sampler child) { + this.childSampler = child; + } + + @Override + public boolean shouldSample(@Nullable SpanContext parentContext, + @Nullable Boolean hasRemoteParent, + TraceId traceId, + SpanId spanId, + String name, + List parentLinks) { + if (name.contains("Bigtable")) { + return false; + } + return childSampler.shouldSample(parentContext, hasRemoteParent, traceId, spanId, name, parentLinks); + } + + @Override + public String getDescription() { + return "from my sampler"; + } +} ``` -You can view the metrics on the Google Cloud Platform Console -[Metrics explorer](https://console.cloud.google.com/monitoring/metrics-explorer) -page. - -You can configure how frequently metrics are pushed to StackDriver and the -[Monitored resource type](https://cloud.google.com/monitoring/api/resources) by -updating `StackdriverStatsConfiguration`: - -``` java -// Example: configuring export interval and monitored resource type -StackdriverStatsExporter.createAndRegister( - StackdriverStatsConfiguration.builder() - .setProjectId("YOUR_PROJECT_ID") - // Exporting metrics every 10 seconds - .setExportInterval(Duration.create(10, 0)) - // Configure monitored resource type. A common practice is to use the - // monitored resource objects that represent the physical resources - // where your application code is running. See the full list of - // monitored resource type here: - // https://cloud.google.com/monitoring/api/resources - .setMonitoredResource(MonitoredResource.newBuilder() - .setType("global") - .putLabels("project_id", "YOUR_PROJECT_ID") - .build()) - .build() +And use this sampler in your trace config: +```java +Tracing.getTraceConfig().updateActiveTraceParams( + Tracing.getTraceConfig().getActiveTraceParams().toBuilder() + .setSampler(new MySampler(Samplers.probabilitySampler(0.1))) + .build() ); ``` @@ -490,6 +421,7 @@ Samples are in the [`samples/`](https://github.com/googleapis/java-bigtable/tree | Sample | Source Code | Try it | | --------------------------- | --------------------------------- | ------ | | Native Image Bigtable Sample | [source code](https://github.com/googleapis/java-bigtable/blob/main/samples/native-image-sample/src/main/java/com/example/bigtable/NativeImageBigtableSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigtable&page=editor&open_in_editor=samples/native-image-sample/src/main/java/com/example/bigtable/NativeImageBigtableSample.java) | +| Authorized View Example | [source code](https://github.com/googleapis/java-bigtable/blob/main/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigtable&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java) | | Configure Connection Pool | [source code](https://github.com/googleapis/java-bigtable/blob/main/samples/snippets/src/main/java/com/example/bigtable/ConfigureConnectionPool.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigtable&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigtable/ConfigureConnectionPool.java) | | Filters | [source code](https://github.com/googleapis/java-bigtable/blob/main/samples/snippets/src/main/java/com/example/bigtable/Filters.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigtable&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigtable/Filters.java) | | Hello World | [source code](https://github.com/googleapis/java-bigtable/blob/main/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigtable&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java) | diff --git a/google-cloud-bigtable-bom/pom.xml b/google-cloud-bigtable-bom/pom.xml index fc843f7c03..2ed26699c7 100644 --- a/google-cloud-bigtable-bom/pom.xml +++ b/google-cloud-bigtable-bom/pom.xml @@ -3,12 +3,12 @@ 4.0.0 com.google.cloud google-cloud-bigtable-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom com.google.cloud sdk-platform-java-config - 3.28.1 + 3.29.0 @@ -63,42 +63,42 @@ com.google.cloud google-cloud-bigtable - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT com.google.cloud google-cloud-bigtable-emulator - 0.174.1-SNAPSHOT + 0.175.1-SNAPSHOT com.google.cloud google-cloud-bigtable-emulator-core - 0.174.1-SNAPSHOT + 0.175.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-bigtable-admin-v2 - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-bigtable-v2 - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT com.google.api.grpc proto-google-cloud-bigtable-admin-v2 - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT com.google.api.grpc proto-google-cloud-bigtable-v2 - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT com.google.cloud google-cloud-bigtable-stats - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT diff --git a/google-cloud-bigtable-deps-bom/pom.xml b/google-cloud-bigtable-deps-bom/pom.xml index f07753fe60..f605d675e1 100644 --- a/google-cloud-bigtable-deps-bom/pom.xml +++ b/google-cloud-bigtable-deps-bom/pom.xml @@ -7,13 +7,13 @@ com.google.cloud sdk-platform-java-config - 3.28.1 + 3.29.0 com.google.cloud google-cloud-bigtable-deps-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom @@ -66,7 +66,7 @@ com.google.cloud gapic-libraries-bom - 1.32.0 + 1.35.0 pom import diff --git a/google-cloud-bigtable-emulator-core/pom.xml b/google-cloud-bigtable-emulator-core/pom.xml index 8148e52126..d9b2e5da22 100644 --- a/google-cloud-bigtable-emulator-core/pom.xml +++ b/google-cloud-bigtable-emulator-core/pom.xml @@ -7,11 +7,11 @@ google-cloud-bigtable-parent com.google.cloud - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT google-cloud-bigtable-emulator-core - 0.174.1-SNAPSHOT + 0.175.1-SNAPSHOT A Java wrapper for the Cloud Bigtable emulator. diff --git a/google-cloud-bigtable-emulator/pom.xml b/google-cloud-bigtable-emulator/pom.xml index a6a4f39567..bd2e0bcf4e 100644 --- a/google-cloud-bigtable-emulator/pom.xml +++ b/google-cloud-bigtable-emulator/pom.xml @@ -5,7 +5,7 @@ 4.0.0 google-cloud-bigtable-emulator - 0.174.1-SNAPSHOT + 0.175.1-SNAPSHOT Google Cloud Java - Bigtable Emulator https://github.com/googleapis/java-bigtable @@ -14,7 +14,7 @@ com.google.cloud google-cloud-bigtable-parent - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT scm:git:git@github.com:googleapis/java-bigtable.git @@ -81,14 +81,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom import com.google.cloud google-cloud-bigtable-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom import @@ -99,7 +99,7 @@ com.google.cloud google-cloud-bigtable-emulator-core - 0.174.1-SNAPSHOT + 0.175.1-SNAPSHOT diff --git a/google-cloud-bigtable-stats/clirr-ignored-differences.xml b/google-cloud-bigtable-stats/clirr-ignored-differences.xml deleted file mode 100644 index aa9be424a8..0000000000 --- a/google-cloud-bigtable-stats/clirr-ignored-differences.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - 7005 - com/google/cloud/bigtable/stats/StatsRecorderWrapper - *StatsRecorderWrapper* - *StatsRecorder* - - - - 7002 - com/google/cloud/bigtable/stats/StatsRecorderWrapper - void record(java.lang.String, java.lang.String, java.lang.String, java.lang.String) - - - - 7002 - com/google/cloud/bigtable/stats/StatsRecorderWrapper - void putBatchRequestThrottled(long) - - - - 7005 - com/google/cloud/bigtable/stats/StatsRecorderWrapperForConnection - *StatsRecorderWrapperForConnection* - * - - - - 7002 - com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils$ResourceUtilsWrapper - * - - - - 7006 - com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils$ResourceUtilsWrapper - * - * - - diff --git a/google-cloud-bigtable-stats/pom.xml b/google-cloud-bigtable-stats/pom.xml deleted file mode 100644 index 92d3e9fa8e..0000000000 --- a/google-cloud-bigtable-stats/pom.xml +++ /dev/null @@ -1,269 +0,0 @@ - - - - com.google.cloud - google-cloud-bigtable-parent - 2.37.1-SNAPSHOT - - 4.0.0 - - - google-cloud-bigtable-stats - 2.37.1-SNAPSHOT - Experimental project to shade OpenCensus dependencies. - - - - - com.google.cloud - google-cloud-bigtable-deps-bom - 2.37.1-SNAPSHOT - pom - import - - - - - - - - io.opencensus - opencensus-api - - - io.opencensus - opencensus-exporter-stats-stackdriver - - - io.opencensus - opencensus-contrib-resource-util - - - io.opencensus - opencensus-impl - runtime - - - - - com.google.cloud - google-cloud-monitoring - - - - com.google.http-client - google-http-client-gson - - - com.google.http-client - google-http-client - - - - io.perfmark - perfmark-api - - - - - com.google.api.grpc - proto-google-cloud-monitoring-v3 - - - com.google.api.grpc - proto-google-common-protos - - - com.google.auth - google-auth-library-credentials - - - com.google.api - gax - - - - com.google.http-client - google-http-client - - - com.google.http-client - google-http-client-gson - - - - - com.google.api - api-common - - - com.google.api - gax-grpc - - - com.google.protobuf - protobuf-java - - - com.google.guava - guava - - - org.threeten - threetenbp - - - com.google.code.findbugs - jsr305 - - - - - com.google.http-client - google-http-client - runtime - - - - - com.google.truth - truth - test - - - junit - junit - test - - - org.mockito - mockito-core - test - - - - - - - org.apache.maven.plugins - maven-shade-plugin - 3.2.4 - - - package - - shade - - - false - true - - - - io.opencensus:* - - - - - io.opencensus - - com.google.bigtable.veneer.repackaged.io.opencensus - - - - - - - - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - 3.6.1 - - - - - - - - io.opencensus:opencensus-exporter-metrics-util:* - io.opencensus:opencensus-exporter-stats-stackdriver:* - - - - - org.codehaus.mojo - clirr-maven-plugin - - - com/google/bigtable/veneer/repackaged/** - - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - enforce-version-consistency - - enforce - - - - - - - - - io.opencensus:*:[0.31.1] - io.opencensus:opencensus-proto:[0.2.0] - - - - - - - - - org.codehaus.mojo - license-maven-plugin - 2.4.0 - - - default-cli - generate-resources - - add-third-party - - - test - - io.opencensus:* - true - - - - - - - - - test - - - - - - diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporter.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporter.java deleted file mode 100644 index d8936b0e0e..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporter.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.MonitoredResource; -import com.google.cloud.monitoring.v3.MetricServiceClient; -import com.google.monitoring.v3.CreateTimeSeriesRequest; -import com.google.monitoring.v3.ProjectName; -import io.opencensus.exporter.metrics.util.MetricExporter; -import io.opencensus.metrics.export.Metric; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.logging.Level; -import java.util.logging.Logger; -import java.util.stream.Collectors; - -final class BigtableCreateTimeSeriesExporter extends MetricExporter { - private static final Logger logger = - Logger.getLogger(BigtableCreateTimeSeriesExporter.class.getName()); - private final MetricServiceClient metricServiceClient; - private final MonitoredResource gceOrGkeMonitoredResource; - private final String clientId; - - BigtableCreateTimeSeriesExporter( - MetricServiceClient metricServiceClient, MonitoredResource gceOrGkeMonitoredResource) { - this.metricServiceClient = metricServiceClient; - this.clientId = BigtableStackdriverExportUtils.getDefaultTaskValue(); - this.gceOrGkeMonitoredResource = gceOrGkeMonitoredResource; - } - - public void export(Collection metrics) { - Map> projectToTimeSeries = new HashMap<>(); - - for (Metric metric : metrics) { - // only export bigtable metrics - if (!BigtableStackdriverExportUtils.shouldExportMetric(metric.getMetricDescriptor())) { - continue; - } - - projectToTimeSeries = - metric.getTimeSeriesList().stream() - .collect( - Collectors.groupingBy( - timeSeries -> - BigtableStackdriverExportUtils.getProjectId( - metric.getMetricDescriptor(), timeSeries, gceOrGkeMonitoredResource), - Collectors.mapping( - timeSeries -> - BigtableStackdriverExportUtils.convertTimeSeries( - metric.getMetricDescriptor(), - timeSeries, - clientId, - gceOrGkeMonitoredResource), - Collectors.toList()))); - - for (Map.Entry> entry : - projectToTimeSeries.entrySet()) { - ProjectName projectName = ProjectName.of(entry.getKey()); - CreateTimeSeriesRequest request = - CreateTimeSeriesRequest.newBuilder() - .setName(projectName.toString()) - .addAllTimeSeries(entry.getValue()) - .build(); - try { - this.metricServiceClient.createServiceTimeSeries(request); - } catch (Throwable e) { - logger.log( - Level.WARNING, - "Exception thrown when exporting TimeSeries for projectName=" - + projectName.getProject(), - e); - } - } - } - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverExportUtils.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverExportUtils.java deleted file mode 100644 index cc70fbb435..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverExportUtils.java +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.cloud.bigtable.stats.BuiltinViewConstants.PER_CONNECTION_ERROR_COUNT_VIEW; - -import com.google.api.Distribution.BucketOptions; -import com.google.api.Distribution.BucketOptions.Explicit; -import com.google.api.Metric; -import com.google.api.MetricDescriptor.MetricKind; -import com.google.api.MonitoredResource; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Maps; -import com.google.monitoring.v3.TimeInterval; -import com.google.monitoring.v3.TypedValue; -import io.opencensus.common.Function; -import io.opencensus.common.Functions; -import io.opencensus.common.Timestamp; -import io.opencensus.metrics.LabelKey; -import io.opencensus.metrics.LabelValue; -import io.opencensus.metrics.export.Distribution; -import io.opencensus.metrics.export.Distribution.Bucket; -import io.opencensus.metrics.export.Distribution.BucketOptions.ExplicitOptions; -import io.opencensus.metrics.export.MetricDescriptor; -import io.opencensus.metrics.export.MetricDescriptor.Type; -import io.opencensus.metrics.export.Point; -import io.opencensus.metrics.export.Summary; -import io.opencensus.metrics.export.TimeSeries; -import io.opencensus.metrics.export.Value; -import java.lang.management.ManagementFactory; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.security.SecureRandom; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.annotation.Nullable; - -class BigtableStackdriverExportUtils { - private static final String BIGTABLE_RESOURCE_TYPE = "bigtable_client_raw"; - - @VisibleForTesting static final String GCE_RESOURCE_TYPE = "gce_instance"; - @VisibleForTesting static final String GKE_RESOURCE_TYPE = "k8s_container"; - @VisibleForTesting static final String GCE_OR_GKE_PROJECT_ID_KEY = "project_id"; - private static final Logger logger = - Logger.getLogger(BigtableStackdriverExportUtils.class.getName()); - - private static final Function typedValueDoubleFunction = - arg -> { - TypedValue.Builder builder = TypedValue.newBuilder(); - builder.setDoubleValue(arg); - return builder.build(); - }; - private static final Function typedValueLongFunction = - arg -> { - TypedValue.Builder builder = TypedValue.newBuilder(); - builder.setInt64Value(arg); - return builder.build(); - }; - private static final Function typedValueDistributionFunction = - arg -> { - TypedValue.Builder builder = TypedValue.newBuilder(); - return builder - .setDistributionValue(BigtableStackdriverExportUtils.createDistribution(arg)) - .build(); - }; - private static final Function typedValueSummaryFunction = - arg -> { - TypedValue.Builder builder = TypedValue.newBuilder(); - return builder.build(); - }; - private static final Function bucketOptionsExplicitFunction = - arg -> { - BucketOptions.Builder builder = BucketOptions.newBuilder(); - Explicit.Builder explicitBuilder = Explicit.newBuilder(); - explicitBuilder.addBounds(0.0D); - explicitBuilder.addAllBounds(arg.getBucketBoundaries()); - builder.setExplicitBuckets(explicitBuilder.build()); - return builder.build(); - }; - - // promote the following metric labels to Bigtable monitored resource labels - private static final Set PROMOTED_BIGTABLE_RESOURCE_LABELS = - ImmutableSet.of( - BuiltinMeasureConstants.PROJECT_ID.getName(), - BuiltinMeasureConstants.INSTANCE_ID.getName(), - BuiltinMeasureConstants.CLUSTER.getName(), - BuiltinMeasureConstants.ZONE.getName(), - BuiltinMeasureConstants.TABLE.getName()); - - private static final LabelKey CLIENT_UID_LABEL_KEY = - LabelKey.create(BuiltinMeasureConstants.CLIENT_UID.getName(), "client uid"); - - static boolean isBigtableTableMetric(MetricDescriptor metricDescriptor) { - return metricDescriptor.getName().contains("bigtable") - && !metricDescriptor.getName().equals(PER_CONNECTION_ERROR_COUNT_VIEW.getName().asString()); - } - - static boolean shouldExportMetric(MetricDescriptor metricDescriptor) { - return isBigtableTableMetric(metricDescriptor) - || (metricDescriptor.getName().equals(PER_CONNECTION_ERROR_COUNT_VIEW.getName().asString()) - && (ConsumerEnvironmentUtils.isEnvGce() || ConsumerEnvironmentUtils.isEnvGke())); - } - - static com.google.monitoring.v3.TimeSeries convertTimeSeries( - MetricDescriptor metricDescriptor, - TimeSeries timeSeries, - String clientId, - MonitoredResource gceOrGkeMonitoredResource) { - Type metricType = metricDescriptor.getType(); - - com.google.monitoring.v3.TimeSeries.Builder builder; - if (isBigtableTableMetric(metricDescriptor)) { - builder = - setupBuilderForBigtableResource( - metricDescriptor, - MonitoredResource.newBuilder().setType(BIGTABLE_RESOURCE_TYPE), - timeSeries, - clientId); - } else if (ConsumerEnvironmentUtils.isEnvGce() || ConsumerEnvironmentUtils.isEnvGke()) { - builder = - setupBuilderForGceOrGKEResource( - metricDescriptor, gceOrGkeMonitoredResource, timeSeries, clientId); - } else { - logger.warning( - "Trying to export metric " - + metricDescriptor.getName() - + " in a non-GCE/GKE environment."); - return com.google.monitoring.v3.TimeSeries.newBuilder().build(); - } - builder.setMetricKind(createMetricKind(metricType)); - builder.setValueType(createValueType(metricType)); - Timestamp startTimeStamp = timeSeries.getStartTimestamp(); - for (Point point : timeSeries.getPoints()) { - builder.addPoints(createPoint(point, startTimeStamp)); - } - return builder.build(); - } - - private static com.google.monitoring.v3.TimeSeries.Builder setupBuilderForBigtableResource( - MetricDescriptor metricDescriptor, - MonitoredResource.Builder monitoredResourceBuilder, - TimeSeries timeSeries, - String clientId) { - List labelKeys = metricDescriptor.getLabelKeys(); - String metricName = metricDescriptor.getName(); - List metricTagKeys = new ArrayList<>(); - List metricTagValues = new ArrayList<>(); - - List labelValues = timeSeries.getLabelValues(); - for (int i = 0; i < labelValues.size(); i++) { - // If the label is defined in the monitored resource, convert it to - // a monitored resource label. Otherwise, keep it as a metric label. - if (PROMOTED_BIGTABLE_RESOURCE_LABELS.contains(labelKeys.get(i).getKey())) { - monitoredResourceBuilder.putLabels( - labelKeys.get(i).getKey(), labelValues.get(i).getValue()); - } else { - metricTagKeys.add(labelKeys.get(i)); - metricTagValues.add(labelValues.get(i)); - } - } - metricTagKeys.add(CLIENT_UID_LABEL_KEY); - metricTagValues.add(LabelValue.create(clientId)); - - com.google.monitoring.v3.TimeSeries.Builder builder = - com.google.monitoring.v3.TimeSeries.newBuilder(); - builder.setResource(monitoredResourceBuilder.build()); - builder.setMetric(createMetric(metricName, metricTagKeys, metricTagValues)); - - return builder; - } - - private static com.google.monitoring.v3.TimeSeries.Builder setupBuilderForGceOrGKEResource( - MetricDescriptor metricDescriptor, - MonitoredResource gceOrGkeMonitoredResource, - TimeSeries timeSeries, - String clientId) { - List labelKeys = metricDescriptor.getLabelKeys(); - String metricName = metricDescriptor.getName(); - List metricTagKeys = new ArrayList<>(); - List metricTagValues = new ArrayList<>(); - - List labelValues = timeSeries.getLabelValues(); - for (int i = 0; i < labelValues.size(); i++) { - metricTagKeys.add(labelKeys.get(i)); - metricTagValues.add(labelValues.get(i)); - } - metricTagKeys.add(CLIENT_UID_LABEL_KEY); - metricTagValues.add(LabelValue.create(clientId)); - - com.google.monitoring.v3.TimeSeries.Builder builder = - com.google.monitoring.v3.TimeSeries.newBuilder(); - builder.setResource(gceOrGkeMonitoredResource); - builder.setMetric(createMetric(metricName, metricTagKeys, metricTagValues)); - - return builder; - } - - static String getProjectId( - MetricDescriptor metricDescriptor, - TimeSeries timeSeries, - MonitoredResource gceOrGkeMonitoredResource) { - if (isBigtableTableMetric(metricDescriptor)) { - return getProjectIdForBigtableTableResource(metricDescriptor, timeSeries); - } else { - return getProjectIdForGceOrGkeResource(gceOrGkeMonitoredResource); - } - } - - static String getProjectIdForBigtableTableResource( - MetricDescriptor metricDescriptor, TimeSeries timeSeries) { - List labelKeys = metricDescriptor.getLabelKeys(); - List labelValues = timeSeries.getLabelValues(); - for (int i = 0; i < labelKeys.size(); i++) { - if (labelKeys.get(i).getKey().equals(BuiltinMeasureConstants.PROJECT_ID.getName())) { - return labelValues.get(i).getValue(); - } - } - throw new IllegalStateException("Can't find project id for the current timeseries"); - } - - static String getProjectIdForGceOrGkeResource(MonitoredResource gceOrGkeMonitoredResource) { - if (!gceOrGkeMonitoredResource.getType().equals(GCE_RESOURCE_TYPE) - && !gceOrGkeMonitoredResource.getType().equals(GKE_RESOURCE_TYPE)) { - throw new IllegalStateException( - "Expected GCE or GKE resource type, but found " + gceOrGkeMonitoredResource); - } - return gceOrGkeMonitoredResource.getLabelsOrThrow(GCE_OR_GKE_PROJECT_ID_KEY); - } - - static String getDefaultTaskValue() { - // Something like '@' - final String jvmName = ManagementFactory.getRuntimeMXBean().getName(); - // If not the expected format then generate a random number. - if (jvmName.indexOf('@') < 1) { - String hostname = "localhost"; - try { - hostname = InetAddress.getLocalHost().getHostName(); - } catch (UnknownHostException e) { - logger.log(Level.INFO, "Unable to get the hostname.", e); - } - // Generate a random number and use the same format "random_number@hostname". - return "java-" + new SecureRandom().nextInt() + "@" + hostname; - } - return "java-" + UUID.randomUUID() + jvmName; - } - - private static MetricKind createMetricKind(Type type) { - switch (type) { - case CUMULATIVE_DOUBLE: - case CUMULATIVE_INT64: - case CUMULATIVE_DISTRIBUTION: - return MetricKind.CUMULATIVE; - default: - return MetricKind.UNRECOGNIZED; - } - } - - private static com.google.api.MetricDescriptor.ValueType createValueType(Type type) { - switch (type) { - case CUMULATIVE_DOUBLE: - return com.google.api.MetricDescriptor.ValueType.DOUBLE; - case CUMULATIVE_INT64: - return com.google.api.MetricDescriptor.ValueType.INT64; - case CUMULATIVE_DISTRIBUTION: - return com.google.api.MetricDescriptor.ValueType.DISTRIBUTION; - default: - return com.google.api.MetricDescriptor.ValueType.UNRECOGNIZED; - } - } - - private static Metric createMetric( - String metricName, List labelKeys, List labelValues) { - Metric.Builder builder = Metric.newBuilder(); - builder.setType(metricName); - Map stringTagMap = Maps.newHashMap(); - - for (int i = 0; i < labelValues.size(); ++i) { - String value = labelValues.get(i).getValue(); - if (value != null) { - stringTagMap.put(labelKeys.get(i).getKey(), value); - } - } - - builder.putAllLabels(stringTagMap); - return builder.build(); - } - - private static com.google.monitoring.v3.Point createPoint(Point point, Timestamp startTimestamp) { - com.google.monitoring.v3.TimeInterval.Builder timeIntervalBuilder = TimeInterval.newBuilder(); - timeIntervalBuilder.setStartTime(convertTimestamp(startTimestamp)); - timeIntervalBuilder.setEndTime(convertTimestamp(point.getTimestamp())); - - com.google.monitoring.v3.Point.Builder builder = com.google.monitoring.v3.Point.newBuilder(); - builder.setInterval(timeIntervalBuilder.build()); - builder.setValue(createTypedValue(point.getValue())); - return builder.build(); - } - - private static TypedValue createTypedValue(Value value) { - return value.match( - typedValueDoubleFunction, - typedValueLongFunction, - typedValueDistributionFunction, - typedValueSummaryFunction, - Functions.throwIllegalArgumentException()); - } - - private static com.google.api.Distribution createDistribution(Distribution distribution) { - com.google.api.Distribution.Builder builder = - com.google.api.Distribution.newBuilder() - .setBucketOptions(createBucketOptions(distribution.getBucketOptions())) - .setCount(distribution.getCount()) - .setMean( - distribution.getCount() == 0L - ? 0.0D - : distribution.getSum() / (double) distribution.getCount()) - .setSumOfSquaredDeviation(distribution.getSumOfSquaredDeviations()); - setBucketCounts(distribution.getBuckets(), builder); - return builder.build(); - } - - private static BucketOptions createBucketOptions( - @Nullable Distribution.BucketOptions bucketOptions) { - com.google.api.Distribution.BucketOptions.Builder builder = BucketOptions.newBuilder(); - return bucketOptions == null - ? builder.build() - : bucketOptions.match( - bucketOptionsExplicitFunction, Functions.throwIllegalArgumentException()); - } - - private static void setBucketCounts( - List buckets, com.google.api.Distribution.Builder builder) { - builder.addBucketCounts(0L); - - for (Bucket bucket : buckets) { - builder.addBucketCounts(bucket.getCount()); - } - } - - private static com.google.protobuf.Timestamp convertTimestamp(Timestamp censusTimestamp) { - return censusTimestamp.getSeconds() < 0L - ? com.google.protobuf.Timestamp.newBuilder().build() - : com.google.protobuf.Timestamp.newBuilder() - .setSeconds(censusTimestamp.getSeconds()) - .setNanos(censusTimestamp.getNanos()) - .build(); - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverStatsExporter.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverStatsExporter.java deleted file mode 100644 index 856353cfd0..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverStatsExporter.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.MonitoredResource; -import com.google.api.core.InternalApi; -import com.google.api.gax.core.FixedCredentialsProvider; -import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; -import com.google.auth.Credentials; -import com.google.cloud.monitoring.v3.MetricServiceClient; -import com.google.cloud.monitoring.v3.MetricServiceSettings; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.MoreObjects; -import com.google.common.base.Preconditions; -import io.opencensus.common.Duration; -import io.opencensus.exporter.metrics.util.IntervalMetricReader; -import io.opencensus.exporter.metrics.util.MetricReader; -import io.opencensus.exporter.stats.stackdriver.StackdriverStatsConfiguration; -import io.opencensus.metrics.Metrics; -import java.io.IOException; -import javax.annotation.Nullable; -import javax.annotation.concurrent.GuardedBy; - -@InternalApi -public class BigtableStackdriverStatsExporter { - static final Object lock = new Object(); - - @Nullable - @GuardedBy("lock") - private static BigtableStackdriverStatsExporter instance = null; - - // Default export interval is 1 minute - private static final Duration EXPORT_INTERVAL = Duration.create(60, 0); - - private static final String MONITORING_ENDPOINT = - MoreObjects.firstNonNull( - System.getProperty("bigtable.test-monitoring-endpoint"), - MetricServiceSettings.getDefaultEndpoint()); - - private final IntervalMetricReader intervalMetricReader; - - private BigtableStackdriverStatsExporter( - MetricServiceClient metricServiceClient, - Duration exportInterval, - MonitoredResource gceOrGkeMonitoredResource) { - IntervalMetricReader.Options.Builder intervalMetricReaderOptionsBuilder = - IntervalMetricReader.Options.builder(); - intervalMetricReaderOptionsBuilder.setExportInterval(exportInterval); - this.intervalMetricReader = - IntervalMetricReader.create( - new BigtableCreateTimeSeriesExporter(metricServiceClient, gceOrGkeMonitoredResource), - MetricReader.create( - MetricReader.Options.builder() - .setMetricProducerManager( - Metrics.getExportComponent().getMetricProducerManager()) - .build()), - intervalMetricReaderOptionsBuilder.build()); - } - - public static void register(Credentials credentials) throws IOException { - synchronized (lock) { - Preconditions.checkState( - instance == null, "Bigtable Stackdriver stats exporter is already created"); - // Default timeout for creating a client is 1 minute - MetricServiceClient client = createMetricServiceClient(credentials, Duration.create(60L, 0)); - MonitoredResource gceOrGkeMonitoredResource = null; - if (ConsumerEnvironmentUtils.isEnvGce() || ConsumerEnvironmentUtils.isEnvGke()) { - gceOrGkeMonitoredResource = - StackdriverStatsConfiguration.builder().build().getMonitoredResource(); - } - instance = - new BigtableStackdriverStatsExporter(client, EXPORT_INTERVAL, gceOrGkeMonitoredResource); - } - } - - @GuardedBy("lock") - @VisibleForTesting - static MetricServiceClient createMetricServiceClient(Credentials credentials, Duration deadline) - throws IOException { - MetricServiceSettings.Builder settingsBuilder = - MetricServiceSettings.newBuilder() - .setTransportChannelProvider(InstantiatingGrpcChannelProvider.newBuilder().build()); - settingsBuilder.setCredentialsProvider(FixedCredentialsProvider.create(credentials)); - settingsBuilder.setEndpoint(MONITORING_ENDPOINT); - org.threeten.bp.Duration timeout = org.threeten.bp.Duration.ofMillis(deadline.toMillis()); - settingsBuilder.createServiceTimeSeriesSettings().setSimpleTimeoutNoRetries(timeout); - return MetricServiceClient.create(settingsBuilder.build()); - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinMeasureConstants.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinMeasureConstants.java deleted file mode 100644 index 59e7511d41..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinMeasureConstants.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static io.opencensus.stats.Measure.MeasureLong; - -import io.opencensus.tags.TagKey; - -/** Built-in metrics that will be readable under bigtable.googleapis.com/client namespace */ -class BuiltinMeasureConstants { - // Monitored resource TagKeys - static final TagKey PROJECT_ID = TagKey.create("project_id"); - static final TagKey INSTANCE_ID = TagKey.create("instance"); - static final TagKey CLUSTER = TagKey.create("cluster"); - static final TagKey TABLE = TagKey.create("table"); - static final TagKey ZONE = TagKey.create("zone"); - static final TagKey CLIENT_UID = TagKey.create("client_uid"); - - // Metrics TagKeys - static final TagKey APP_PROFILE = TagKey.create("app_profile"); - static final TagKey METHOD = TagKey.create("method"); - static final TagKey STREAMING = TagKey.create("streaming"); - static final TagKey STATUS = TagKey.create("status"); - static final TagKey CLIENT_NAME = TagKey.create("client_name"); - - // Units - private static final String COUNT = "1"; - private static final String MILLISECOND = "ms"; - - // Measurements - static final MeasureLong OPERATION_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/operation_latencies", - "Total time until final operation success or failure, including retries and backoff.", - MILLISECOND); - - static final MeasureLong ATTEMPT_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/attempt_latencies", - "Client observed latency per RPC attempt.", - MILLISECOND); - - static final MeasureLong RETRY_COUNT = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/retry_count", - "The number of additional RPCs sent after the initial attempt.", - COUNT); - - static final MeasureLong FIRST_RESPONSE_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/first_response_latencies", - "Latency from operation start until the response headers were received. The publishing of the measurement will be delayed until the attempt response has been received.", - MILLISECOND); - - static final MeasureLong SERVER_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/server_latencies", - "The latency measured from the moment that the RPC entered the Google data center until the RPC was completed.", - MILLISECOND); - - static final MeasureLong CONNECTIVITY_ERROR_COUNT = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/connectivity_error_count", - "Number of requests that failed to reach the Google datacenter. (Requests without google response headers).", - COUNT); - - static final MeasureLong APPLICATION_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/application_latencies", - "The latency of the client application consuming available response data.", - MILLISECOND); - - static final MeasureLong THROTTLING_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/throttling_latencies", - "The artificial latency introduced by the client to limit the number of outstanding requests. The publishing of the measurement will be delayed until the attempt trailers have been received.", - MILLISECOND); - - static final MeasureLong PER_CONNECTION_ERROR_COUNT = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/per_connection_error_count", - "Distribution of counts of channels per 'error count per minute'.", - COUNT); -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViewConstants.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViewConstants.java deleted file mode 100644 index 82ce61e2d3..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViewConstants.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.APPLICATION_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.APP_PROFILE; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.ATTEMPT_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.CLIENT_NAME; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.CLUSTER; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.CONNECTIVITY_ERROR_COUNT; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.FIRST_RESPONSE_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.INSTANCE_ID; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.METHOD; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.OPERATION_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.PER_CONNECTION_ERROR_COUNT; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.PROJECT_ID; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.RETRY_COUNT; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.SERVER_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.STATUS; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.STREAMING; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.TABLE; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.THROTTLING_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.ZONE; -import static io.opencensus.stats.Aggregation.Distribution; -import static io.opencensus.stats.Aggregation.Sum; - -import com.google.common.collect.ImmutableList; -import io.opencensus.stats.Aggregation; -import io.opencensus.stats.BucketBoundaries; -import io.opencensus.stats.View; - -/** Create built-in metrics views under bigtable.googleapis.com/internal/client namespace */ -class BuiltinViewConstants { - private static final Aggregation AGGREGATION_WITH_MILLIS_HISTOGRAM = - Distribution.create( - BucketBoundaries.create( - ImmutableList.of( - 0.0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, - 13.0, 16.0, 20.0, 25.0, 30.0, 40.0, 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, - 250.0, 300.0, 400.0, 500.0, 650.0, 800.0, 1000.0, 2000.0, 5000.0, 10000.0, - 20000.0, 50000.0, 100000.0))); - - private static final Aggregation AGGREGATION_RETRY_COUNT = - Distribution.create( - BucketBoundaries.create( - ImmutableList.of( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 15.0, 20.0, 30.0, 40.0, 50.0, - 100.0))); - - private static final Aggregation PER_CONNECTION_ERROR_COUNT_AGGREGATION = - Distribution.create( - BucketBoundaries.create( - ImmutableList.of( - 1.0, - 2.0, - 4.0, - 8.0, - 16.0, - 32.0, - 64.0, - 125.0, - 250.0, - 500.0, - 1_000.0, - 2_000.0, - 4_000.0, - 8_000.0, - 16_000.0, - 32_000.0, - 64_000.0, - 128_000.0, - 250_000.0, - 500_000.0, - 1_000_000.0))); - - private static final Aggregation AGGREGATION_COUNT = Sum.create(); - - static final View OPERATION_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/operation_latencies"), - "Total time until final operation success or failure, including retries and backoff.", - OPERATION_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STREAMING, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View ATTEMPT_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/attempt_latencies"), - "Client observed latency per RPC attempt.", - ATTEMPT_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STREAMING, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View RETRY_COUNT_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/retry_count"), - "The number of additional RPCs sent after the initial attempt.", - RETRY_COUNT, - AGGREGATION_COUNT, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View FIRST_RESPONSE_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/first_response_latencies"), - "Latency from operation start until the response headers were received. The publishing of the measurement will be delayed until the attempt response has been received.", - FIRST_RESPONSE_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View SERVER_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/server_latencies"), - "The latency measured from the moment that the RPC entered the Google data center until the RPC was completed.", - SERVER_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STATUS, - STREAMING, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View CONNECTIVITY_ERROR_COUNT_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/connectivity_error_count"), - "Number of requests that failed to reach the Google datacenter. (Requests without google response headers).", - CONNECTIVITY_ERROR_COUNT, - AGGREGATION_COUNT, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View APPLICATION_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/application_latencies"), - "The latency of the client application consuming available response data.", - APPLICATION_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, INSTANCE_ID, APP_PROFILE, METHOD, CLIENT_NAME, CLUSTER, ZONE, TABLE)); - - static final View THROTTLING_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/throttling_latencies"), - "The artificial latency introduced by the client to limit the number of outstanding requests. The publishing of the measurement will be delayed until the attempt trailers have been received.", - THROTTLING_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, INSTANCE_ID, APP_PROFILE, METHOD, CLIENT_NAME, CLUSTER, ZONE, TABLE)); - - static final View PER_CONNECTION_ERROR_COUNT_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/per_connection_error_count"), - "Distribution of counts of channels per 'error count per minute'.", - PER_CONNECTION_ERROR_COUNT, - PER_CONNECTION_ERROR_COUNT_AGGREGATION, - ImmutableList.of(PROJECT_ID, INSTANCE_ID, APP_PROFILE, CLIENT_NAME)); -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViews.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViews.java deleted file mode 100644 index 2b91ee60c3..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViews.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.core.InternalApi; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableSet; -import io.opencensus.stats.Stats; -import io.opencensus.stats.View; -import io.opencensus.stats.ViewManager; - -/** For registering built-in metric views */ -@InternalApi("For internal use only") -public class BuiltinViews { - - @VisibleForTesting - static final ImmutableSet BIGTABLE_BUILTIN_VIEWS = - ImmutableSet.of( - BuiltinViewConstants.OPERATION_LATENCIES_VIEW, - BuiltinViewConstants.ATTEMPT_LATENCIES_VIEW, - BuiltinViewConstants.RETRY_COUNT_VIEW, - BuiltinViewConstants.FIRST_RESPONSE_LATENCIES_VIEW, - BuiltinViewConstants.SERVER_LATENCIES_VIEW, - BuiltinViewConstants.CONNECTIVITY_ERROR_COUNT_VIEW, - BuiltinViewConstants.APPLICATION_LATENCIES_VIEW, - BuiltinViewConstants.THROTTLING_LATENCIES_VIEW); - // We store views that don't use the Bigtable schema and need different tags in a separate set to - // simplify testing. - static final ImmutableSet NON_BIGTABLE_BUILTIN_VIEWS = - ImmutableSet.of(BuiltinViewConstants.PER_CONNECTION_ERROR_COUNT_VIEW); - - @VisibleForTesting - void registerPrivateViews(ViewManager viewManager) { - for (View view : BIGTABLE_BUILTIN_VIEWS) { - viewManager.registerView(view); - } - for (View view : NON_BIGTABLE_BUILTIN_VIEWS) { - viewManager.registerView(view); - } - } - - public static void registerBigtableBuiltinViews() { - ViewManager viewManager = Stats.getViewManager(); - for (View view : BIGTABLE_BUILTIN_VIEWS) { - viewManager.registerView(view); - } - for (View view : NON_BIGTABLE_BUILTIN_VIEWS) { - viewManager.registerView(view); - } - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils.java deleted file mode 100644 index 8c84850f6a..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.common.annotations.VisibleForTesting; -import io.opencensus.contrib.resource.util.CloudResource; -import io.opencensus.contrib.resource.util.ContainerResource; -import io.opencensus.contrib.resource.util.HostResource; -import io.opencensus.contrib.resource.util.ResourceUtils; -import io.opencensus.resource.Resource; -import java.util.Objects; - -/** A class for extracting details about consumer environments (GCE and GKE) for metrics. */ -class ConsumerEnvironmentUtils { - - private static ResourceUtilsWrapper resourceUtilsWrapper = new ResourceUtilsWrapper(); - - @VisibleForTesting - public static void setResourceUtilsWrapper(ResourceUtilsWrapper newResourceUtilsWrapper) { - resourceUtilsWrapper = newResourceUtilsWrapper; - } - - public static boolean isEnvGce() { - Resource resource = resourceUtilsWrapper.detectOpenCensusResource(); - return Objects.equals(resource.getType(), HostResource.TYPE) - && Objects.equals( - resource.getLabels().get(CloudResource.PROVIDER_KEY), CloudResource.PROVIDER_GCP); - } - - public static boolean isEnvGke() { - Resource resource = resourceUtilsWrapper.detectOpenCensusResource(); - return Objects.equals(resource.getType(), ContainerResource.TYPE) - && Objects.equals( - resource.getLabels().get(CloudResource.PROVIDER_KEY), CloudResource.PROVIDER_GCP); - } - - // We wrap the static ResourceUtils.detectResource() method in a non-static method for mocking. - @VisibleForTesting - public static class ResourceUtilsWrapper { - public Resource detectOpenCensusResource() { - return ResourceUtils.detectResource(); - } - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapper.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapper.java deleted file mode 100644 index 6bf0988b91..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapper.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.core.InternalApi; -import com.google.api.gax.tracing.ApiTracerFactory.OperationType; -import com.google.api.gax.tracing.SpanName; -import io.opencensus.stats.MeasureMap; -import io.opencensus.stats.StatsRecorder; -import io.opencensus.tags.TagContext; -import io.opencensus.tags.TagContextBuilder; -import io.opencensus.tags.TagKey; -import io.opencensus.tags.TagValue; -import io.opencensus.tags.Tagger; -import io.opencensus.tags.Tags; -import java.util.Map; - -/** A wrapper to record built-in metrics */ -@InternalApi("For internal use only") -public class StatsRecorderWrapper { - - private final OperationType operationType; - - private final Tagger tagger; - private final StatsRecorder statsRecorder; - private final TagContext parentContext; - private final SpanName spanName; - private final Map statsAttributes; - - private MeasureMap attemptMeasureMap; - private MeasureMap operationMeasureMap; - - public StatsRecorderWrapper( - OperationType operationType, - SpanName spanName, - Map statsAttributes, - StatsRecorder statsRecorder) { - this.operationType = operationType; - this.tagger = Tags.getTagger(); - this.statsRecorder = statsRecorder; - this.spanName = spanName; - this.parentContext = tagger.getCurrentTagContext(); - this.statsAttributes = statsAttributes; - - this.attemptMeasureMap = statsRecorder.newMeasureMap(); - this.operationMeasureMap = statsRecorder.newMeasureMap(); - } - - public void recordOperation(String status, String tableId, String zone, String cluster) { - TagContextBuilder tagCtx = - newTagContextBuilder(tableId, zone, cluster) - .putLocal(BuiltinMeasureConstants.STATUS, TagValue.create(status)); - - boolean isStreaming = operationType == OperationType.ServerStreaming; - tagCtx.putLocal( - BuiltinMeasureConstants.STREAMING, TagValue.create(Boolean.toString(isStreaming))); - - operationMeasureMap.record(tagCtx.build()); - // Reinitialize a new map - operationMeasureMap = statsRecorder.newMeasureMap(); - } - - public void recordAttempt(String status, String tableId, String zone, String cluster) { - TagContextBuilder tagCtx = - newTagContextBuilder(tableId, zone, cluster) - .putLocal(BuiltinMeasureConstants.STATUS, TagValue.create(status)); - - boolean isStreaming = operationType == OperationType.ServerStreaming; - tagCtx.putLocal( - BuiltinMeasureConstants.STREAMING, TagValue.create(Boolean.toString(isStreaming))); - - attemptMeasureMap.record(tagCtx.build()); - // Reinitialize a new map - attemptMeasureMap = statsRecorder.newMeasureMap(); - } - - public void putOperationLatencies(long operationLatency) { - operationMeasureMap.put(BuiltinMeasureConstants.OPERATION_LATENCIES, operationLatency); - } - - public void putAttemptLatencies(long attemptLatency) { - attemptMeasureMap.put(BuiltinMeasureConstants.ATTEMPT_LATENCIES, attemptLatency); - } - - public void putRetryCount(int attemptCount) { - operationMeasureMap.put(BuiltinMeasureConstants.RETRY_COUNT, attemptCount); - } - - public void putApplicationLatencies(long applicationLatency) { - operationMeasureMap.put(BuiltinMeasureConstants.APPLICATION_LATENCIES, applicationLatency); - } - - public void putFirstResponseLatencies(long firstResponseLatency) { - operationMeasureMap.put(BuiltinMeasureConstants.FIRST_RESPONSE_LATENCIES, firstResponseLatency); - } - - public void putGfeLatencies(long serverLatency) { - attemptMeasureMap.put(BuiltinMeasureConstants.SERVER_LATENCIES, serverLatency); - } - - public void putGfeMissingHeaders(long connectivityErrors) { - attemptMeasureMap.put(BuiltinMeasureConstants.CONNECTIVITY_ERROR_COUNT, connectivityErrors); - } - - public void putClientBlockingLatencies(long clientBlockingLatency) { - operationMeasureMap.put(BuiltinMeasureConstants.THROTTLING_LATENCIES, clientBlockingLatency); - } - - private TagContextBuilder newTagContextBuilder(String tableId, String zone, String cluster) { - TagContextBuilder tagContextBuilder = - tagger - .toBuilder(parentContext) - .putLocal(BuiltinMeasureConstants.METHOD, TagValue.create(spanName.toString())) - .putLocal(BuiltinMeasureConstants.TABLE, TagValue.create(tableId)) - .putLocal(BuiltinMeasureConstants.ZONE, TagValue.create(zone)) - .putLocal(BuiltinMeasureConstants.CLUSTER, TagValue.create(cluster)); - for (Map.Entry entry : statsAttributes.entrySet()) { - tagContextBuilder.putLocal(TagKey.create(entry.getKey()), TagValue.create(entry.getValue())); - } - return tagContextBuilder; - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperForConnection.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperForConnection.java deleted file mode 100644 index 3c335d28bc..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperForConnection.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.core.InternalApi; -import io.opencensus.stats.MeasureMap; -import io.opencensus.stats.StatsRecorder; -import io.opencensus.tags.TagContext; -import io.opencensus.tags.TagContextBuilder; -import io.opencensus.tags.TagKey; -import io.opencensus.tags.TagValue; -import io.opencensus.tags.Tagger; -import io.opencensus.tags.Tags; -import java.util.Map; - -/** A wrapper to record built-in metrics for connection metrics not tied to operations/RPCs. */ -@InternalApi("For internal use only") -public class StatsRecorderWrapperForConnection { - private final StatsRecorder statsRecorder; - private final TagContext tagContext; - private MeasureMap perConnectionErrorCountMeasureMap; - - public StatsRecorderWrapperForConnection( - Map statsAttributes, StatsRecorder statsRecorder) { - this.statsRecorder = statsRecorder; - - this.perConnectionErrorCountMeasureMap = statsRecorder.newMeasureMap(); - - Tagger tagger = Tags.getTagger(); - TagContextBuilder tagContextBuilder = tagger.toBuilder(tagger.getCurrentTagContext()); - for (Map.Entry entry : statsAttributes.entrySet()) { - tagContextBuilder.putLocal(TagKey.create(entry.getKey()), TagValue.create(entry.getValue())); - } - this.tagContext = tagContextBuilder.build(); - } - - public void putAndRecordPerConnectionErrorCount(long errorCount) { - perConnectionErrorCountMeasureMap.put( - BuiltinMeasureConstants.PER_CONNECTION_ERROR_COUNT, errorCount); - - perConnectionErrorCountMeasureMap.record(tagContext); - perConnectionErrorCountMeasureMap = statsRecorder.newMeasureMap(); - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsWrapper.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsWrapper.java deleted file mode 100644 index fc6a072d01..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsWrapper.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.api.gax.tracing.ApiTracerFactory.OperationType; - -import com.google.api.core.InternalApi; -import com.google.api.gax.tracing.SpanName; -import io.opencensus.stats.Stats; -import io.opencensus.stats.View; -import io.opencensus.tags.TagKey; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * Wrapper class for accessing opencensus. We use a shaded version of opencensus to avoid polluting - * the global opencensus namespace. And this provides a facade that will not be relocated. - */ -@InternalApi("For internal use only") -public class StatsWrapper { - public static StatsRecorderWrapper createRecorder( - OperationType operationType, SpanName spanName, Map statsAttributes) { - return new StatsRecorderWrapper( - operationType, spanName, statsAttributes, Stats.getStatsRecorder()); - } - - public static StatsRecorderWrapperForConnection createRecorderForConnection( - Map statsAttributes) { - return new StatsRecorderWrapperForConnection(statsAttributes, Stats.getStatsRecorder()); - } - - // This is used in integration tests to get the tag value strings from view manager because Stats - // is relocated to com.google.bigtable.veneer.repackaged.io.opencensus. - @InternalApi("Visible for testing") - public static List getOperationLatencyViewTagValueStrings() { - return Stats.getViewManager().getView(BuiltinViewConstants.OPERATION_LATENCIES_VIEW.getName()) - .getAggregationMap().entrySet().stream() - .map(Map.Entry::getKey) - .flatMap(x -> x.stream()) - .map(x -> x.asString()) - .collect(Collectors.toCollection(ArrayList::new)); - } - - // A workaround to run ITBuiltinViewConstantsTest as integration test. Integration test runs after - // the packaging step. Opencensus classes will be relocated when they are packaged but the - // integration test files will not be. So the integration tests can't reference any transitive - // dependencies that have been relocated. - static Map> getBigtableViewToTagMap() { - Map> map = new HashMap<>(); - for (View view : BuiltinViews.BIGTABLE_BUILTIN_VIEWS) { - List tagKeys = view.getColumns(); - map.put( - view.getName().asString(), - tagKeys.stream().map(tagKey -> tagKey.getName()).collect(Collectors.toList())); - } - return map; - } -} diff --git a/google-cloud-bigtable-stats/src/main/resources/META-INF/license/apache2-LICENSE.txt b/google-cloud-bigtable-stats/src/main/resources/META-INF/license/apache2-LICENSE.txt deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/google-cloud-bigtable-stats/src/main/resources/META-INF/license/apache2-LICENSE.txt +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporterTest.java b/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporterTest.java deleted file mode 100644 index e72b54f0bd..0000000000 --- a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporterTest.java +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.common.truth.Truth.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.google.api.MonitoredResource; -import com.google.api.gax.rpc.UnaryCallable; -import com.google.cloud.monitoring.v3.MetricServiceClient; -import com.google.cloud.monitoring.v3.stub.MetricServiceStub; -import com.google.common.collect.ImmutableMap; -import com.google.monitoring.v3.CreateTimeSeriesRequest; -import com.google.protobuf.Empty; -import io.opencensus.common.Timestamp; -import io.opencensus.contrib.resource.util.CloudResource; -import io.opencensus.contrib.resource.util.ContainerResource; -import io.opencensus.contrib.resource.util.HostResource; -import io.opencensus.metrics.LabelKey; -import io.opencensus.metrics.LabelValue; -import io.opencensus.metrics.export.Metric; -import io.opencensus.metrics.export.MetricDescriptor; -import io.opencensus.metrics.export.Point; -import io.opencensus.metrics.export.TimeSeries; -import io.opencensus.metrics.export.Value; -import io.opencensus.resource.Resource; -import java.util.Arrays; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.ArgumentCaptor; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.junit.MockitoJUnit; -import org.mockito.junit.MockitoRule; - -@RunWith(JUnit4.class) -public class BigtableCreateTimeSeriesExporterTest { - - private static final String bigtableProjectId = "fake-bigtable-project"; - private static final String bigtableInstanceId = "fake-bigtable-instance"; - private static final String appProfileId = "default"; - private static final String tableId = "fake-table"; - private static final String bigtableZone = "us-east-1"; - private static final String bigtableCluster = "cluster-1"; - private static final String clientName = "client-name"; - private static final String gceProjectId = "fake-gce-project"; - private static final String gkeProjectId = "fake-gke-project"; - - @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule(); - - @Mock private MetricServiceStub mockMetricServiceStub; - private MetricServiceClient fakeMetricServiceClient; - - @Before - public void setUp() { - - fakeMetricServiceClient = new FakeMetricServiceClient(mockMetricServiceStub); - } - - @After - public void tearDown() {} - - @Test - public void testTimeSeriesForMetricWithBigtableResource() { - BigtableCreateTimeSeriesExporter exporter = - new BigtableCreateTimeSeriesExporter(fakeMetricServiceClient, null); - ArgumentCaptor argumentCaptor = - ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); - - UnaryCallable mockCallable = mock(UnaryCallable.class); - when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); - when(mockCallable.call(argumentCaptor.capture())).thenReturn(Empty.getDefaultInstance()); - - double fakeValue = 10.0; - Metric fakeMetric = - Metric.create( - MetricDescriptor.create( - "bigtable/test", - "description", - "ms", - MetricDescriptor.Type.CUMULATIVE_DOUBLE, - Arrays.asList( - LabelKey.create(BuiltinMeasureConstants.PROJECT_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.INSTANCE_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.TABLE.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.CLUSTER.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.ZONE.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.APP_PROFILE.getName(), ""))), - Arrays.asList( - TimeSeries.create( - Arrays.asList( - LabelValue.create(bigtableProjectId), - LabelValue.create(bigtableInstanceId), - LabelValue.create(tableId), - LabelValue.create(bigtableCluster), - LabelValue.create(bigtableZone), - LabelValue.create(appProfileId)), - Arrays.asList( - Point.create( - Value.doubleValue(fakeValue), - Timestamp.fromMillis(System.currentTimeMillis()))), - Timestamp.fromMillis(System.currentTimeMillis())))); - - exporter.export(Arrays.asList(fakeMetric)); - - CreateTimeSeriesRequest request = argumentCaptor.getValue(); - - assertThat(request.getName()).isEqualTo("projects/" + bigtableProjectId); - assertThat(request.getTimeSeriesList()).hasSize(1); - - com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0); - - assertThat(timeSeries.getResource().getLabelsMap()) - .containsExactly( - BuiltinMeasureConstants.PROJECT_ID.getName(), bigtableProjectId, - BuiltinMeasureConstants.INSTANCE_ID.getName(), bigtableInstanceId, - BuiltinMeasureConstants.TABLE.getName(), tableId, - BuiltinMeasureConstants.CLUSTER.getName(), bigtableCluster, - BuiltinMeasureConstants.ZONE.getName(), bigtableZone); - - assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.APP_PROFILE.getName(), appProfileId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsKey(BuiltinMeasureConstants.CLIENT_UID.getName()); - - assertThat(timeSeries.getPoints(0).getValue().getDoubleValue()).isEqualTo(fakeValue); - } - - @Test - public void testTimeSeriesForMetricWithGceResource() { - BigtableCreateTimeSeriesExporter exporter = - new BigtableCreateTimeSeriesExporter( - fakeMetricServiceClient, - MonitoredResource.newBuilder() - .setType(BigtableStackdriverExportUtils.GCE_RESOURCE_TYPE) - .putLabels(BigtableStackdriverExportUtils.GCE_OR_GKE_PROJECT_ID_KEY, gceProjectId) - .putLabels("another-gce-key", "another-gce-value") - .build()); - ArgumentCaptor argumentCaptor = - ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); - - UnaryCallable mockCallable = mock(UnaryCallable.class); - when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); - when(mockCallable.call(argumentCaptor.capture())).thenReturn(Empty.getDefaultInstance()); - - ConsumerEnvironmentUtils.ResourceUtilsWrapper resourceUtilsWrapperMock = - Mockito.mock(ConsumerEnvironmentUtils.ResourceUtilsWrapper.class); - ConsumerEnvironmentUtils.setResourceUtilsWrapper(resourceUtilsWrapperMock); - Mockito.when(resourceUtilsWrapperMock.detectOpenCensusResource()) - .thenReturn( - Resource.create( - HostResource.TYPE, - ImmutableMap.of(CloudResource.PROVIDER_KEY, CloudResource.PROVIDER_GCP))); - - double fakeValue = 10.0; - Metric fakeMetric = - Metric.create( - MetricDescriptor.create( - "bigtable.googleapis.com/internal/client/per_connection_error_count", - "description", - "ms", - MetricDescriptor.Type.CUMULATIVE_DOUBLE, - Arrays.asList( - LabelKey.create(BuiltinMeasureConstants.PROJECT_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.INSTANCE_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.APP_PROFILE.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.CLIENT_NAME.getName(), ""))), - Arrays.asList( - TimeSeries.create( - Arrays.asList( - LabelValue.create(bigtableProjectId), - LabelValue.create(bigtableInstanceId), - LabelValue.create(appProfileId), - LabelValue.create(clientName)), - Arrays.asList( - Point.create( - Value.doubleValue(fakeValue), - Timestamp.fromMillis(System.currentTimeMillis()))), - Timestamp.fromMillis(System.currentTimeMillis())))); - - exporter.export(Arrays.asList(fakeMetric)); - - CreateTimeSeriesRequest request = argumentCaptor.getValue(); - - assertThat(request.getName()).isEqualTo("projects/" + gceProjectId); - assertThat(request.getTimeSeriesList()).hasSize(1); - - com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0); - - assertThat(timeSeries.getResource().getLabelsMap()) - .containsExactly( - BigtableStackdriverExportUtils.GCE_OR_GKE_PROJECT_ID_KEY, - gceProjectId, - "another-gce-key", - "another-gce-value"); - - assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.PROJECT_ID.getName(), bigtableProjectId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.INSTANCE_ID.getName(), bigtableInstanceId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.APP_PROFILE.getName(), appProfileId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.CLIENT_NAME.getName(), clientName); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsKey(BuiltinMeasureConstants.CLIENT_UID.getName()); - - assertThat(timeSeries.getPoints(0).getValue().getDoubleValue()).isEqualTo(fakeValue); - } - - @Test - public void testTimeSeriesForMetricWithGkeResource() { - BigtableCreateTimeSeriesExporter exporter = - new BigtableCreateTimeSeriesExporter( - fakeMetricServiceClient, - MonitoredResource.newBuilder() - .setType(BigtableStackdriverExportUtils.GKE_RESOURCE_TYPE) - .putLabels(BigtableStackdriverExportUtils.GCE_OR_GKE_PROJECT_ID_KEY, gkeProjectId) - .putLabels("another-gke-key", "another-gke-value") - .build()); - ArgumentCaptor argumentCaptor = - ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); - - UnaryCallable mockCallable = mock(UnaryCallable.class); - when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); - when(mockCallable.call(argumentCaptor.capture())).thenReturn(Empty.getDefaultInstance()); - - ConsumerEnvironmentUtils.ResourceUtilsWrapper resourceUtilsWrapperMock = - Mockito.mock(ConsumerEnvironmentUtils.ResourceUtilsWrapper.class); - ConsumerEnvironmentUtils.setResourceUtilsWrapper(resourceUtilsWrapperMock); - - Mockito.when(resourceUtilsWrapperMock.detectOpenCensusResource()) - .thenReturn( - Resource.create( - ContainerResource.TYPE, - ImmutableMap.of(CloudResource.PROVIDER_KEY, CloudResource.PROVIDER_GCP))); - - double fakeValue = 10.0; - Metric fakeMetric = - Metric.create( - MetricDescriptor.create( - "bigtable.googleapis.com/internal/client/per_connection_error_count", - "description", - "ms", - MetricDescriptor.Type.CUMULATIVE_DOUBLE, - Arrays.asList( - LabelKey.create(BuiltinMeasureConstants.PROJECT_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.INSTANCE_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.APP_PROFILE.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.CLIENT_NAME.getName(), ""))), - Arrays.asList( - TimeSeries.create( - Arrays.asList( - LabelValue.create(bigtableProjectId), - LabelValue.create(bigtableInstanceId), - LabelValue.create(appProfileId), - LabelValue.create(clientName)), - Arrays.asList( - Point.create( - Value.doubleValue(fakeValue), - Timestamp.fromMillis(System.currentTimeMillis()))), - Timestamp.fromMillis(System.currentTimeMillis())))); - - exporter.export(Arrays.asList(fakeMetric)); - - CreateTimeSeriesRequest request = argumentCaptor.getValue(); - - assertThat(request.getName()).isEqualTo("projects/" + gkeProjectId); - assertThat(request.getTimeSeriesList()).hasSize(1); - - com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0); - - assertThat(timeSeries.getResource().getLabelsMap()) - .containsExactly( - BigtableStackdriverExportUtils.GCE_OR_GKE_PROJECT_ID_KEY, - gkeProjectId, - "another-gke-key", - "another-gke-value"); - - assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.PROJECT_ID.getName(), bigtableProjectId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.INSTANCE_ID.getName(), bigtableInstanceId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.APP_PROFILE.getName(), appProfileId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.CLIENT_NAME.getName(), clientName); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsKey(BuiltinMeasureConstants.CLIENT_UID.getName()); - - assertThat(timeSeries.getPoints(0).getValue().getDoubleValue()).isEqualTo(fakeValue); - } - - private class FakeMetricServiceClient extends MetricServiceClient { - - protected FakeMetricServiceClient(MetricServiceStub stub) { - super(stub); - } - } -} diff --git a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/ITBuiltinViewConstantsTest.java b/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/ITBuiltinViewConstantsTest.java deleted file mode 100644 index c2dcc2a602..0000000000 --- a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/ITBuiltinViewConstantsTest.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.common.truth.Truth.assertWithMessage; - -import java.util.List; -import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -public class ITBuiltinViewConstantsTest { - @Test - public void testBasicTagsExistForAllViews() { - Map> viewToTagMap = StatsWrapper.getBigtableViewToTagMap(); - for (String view : viewToTagMap.keySet()) { - assertWithMessage(view + " should have all basic tags") - .that(viewToTagMap.get(view)) - .containsAtLeast( - "project_id", "instance", "app_profile", "method", "zone", "cluster", "table"); - } - } -} diff --git a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperTest.java b/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperTest.java deleted file mode 100644 index 829202510c..0000000000 --- a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperTest.java +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.common.truth.Truth.assertThat; - -import com.google.api.gax.tracing.ApiTracerFactory; -import com.google.api.gax.tracing.SpanName; -import com.google.common.collect.ImmutableMap; -import io.opencensus.impl.stats.StatsComponentImpl; -import io.opencensus.stats.AggregationData; -import io.opencensus.stats.StatsComponent; -import io.opencensus.stats.View; -import io.opencensus.stats.ViewData; -import io.opencensus.stats.ViewManager; -import io.opencensus.tags.TagKey; -import io.opencensus.tags.TagValue; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -// Can only be run as a unit test. Opencensus classes will be relocated when they are packaged but -// the integration test files will not be. So the integration tests can't reference any transitive -// dependencies that have been relocated. To work around this, we'll have to move all the reference -// to opencensus to StatsWrapper. -@RunWith(JUnit4.class) -public class StatsRecorderWrapperTest { - - private final String PROJECT_ID = "fake-project"; - private final String INSTANCE_ID = "fake-instance"; - private final String APP_PROFILE_ID = "fake-app-profile"; - - private final String TABLE_ID = "fake-table-id"; - private final String ZONE = "fake-zone"; - private final String CLUSTER = "fake-cluster"; - private final String CLIENT_AND_VERSION = "bigtable-java/fake-version"; - - private final StatsComponent statsComponent = new StatsComponentImpl(); - - @Before - public void setup() { - BuiltinViews views = new BuiltinViews(); - views.registerPrivateViews(statsComponent.getViewManager()); - } - - @Test - public void testStreamingOperation() throws InterruptedException { - StatsRecorderWrapper recorderWrapper = - new StatsRecorderWrapper( - ApiTracerFactory.OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - ImmutableMap.of( - BuiltinMeasureConstants.PROJECT_ID.getName(), - PROJECT_ID, - BuiltinMeasureConstants.INSTANCE_ID.getName(), - INSTANCE_ID, - BuiltinMeasureConstants.APP_PROFILE.getName(), - APP_PROFILE_ID, - BuiltinMeasureConstants.CLIENT_NAME.getName(), - CLIENT_AND_VERSION), - statsComponent.getStatsRecorder()); - - long operationLatency = 1234; - int attemptCount = 2; - long attemptLatency = 56; - long serverLatency = 78; - long applicationLatency = 901; - long connectivityErrorCount = 15; - long throttlingLatency = 50; - long firstResponseLatency = 90; - - recorderWrapper.putOperationLatencies(operationLatency); - recorderWrapper.putRetryCount(attemptCount); - recorderWrapper.putAttemptLatencies(attemptLatency); - recorderWrapper.putApplicationLatencies(applicationLatency); - recorderWrapper.putGfeLatencies(serverLatency); - recorderWrapper.putGfeMissingHeaders(connectivityErrorCount); - recorderWrapper.putFirstResponseLatencies(firstResponseLatency); - recorderWrapper.putClientBlockingLatencies(throttlingLatency); - - recorderWrapper.recordOperation("OK", TABLE_ID, ZONE, CLUSTER); - recorderWrapper.recordAttempt("OK", TABLE_ID, ZONE, CLUSTER); - - Thread.sleep(100); - - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.OPERATION_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, "OK", - BuiltinMeasureConstants.TABLE, TABLE_ID, - BuiltinMeasureConstants.ZONE, ZONE, - BuiltinMeasureConstants.CLUSTER, CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, "true"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(operationLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.ATTEMPT_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "true"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(attemptLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.RETRY_COUNT_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(attemptCount); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.SERVER_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "true", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(serverLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.APPLICATION_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "true"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(applicationLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.CONNECTIVITY_ERROR_COUNT_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(connectivityErrorCount); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.THROTTLING_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, "Bigtable.ReadRows", - BuiltinMeasureConstants.TABLE, TABLE_ID, - BuiltinMeasureConstants.ZONE, ZONE, - BuiltinMeasureConstants.CLUSTER, CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(throttlingLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.FIRST_RESPONSE_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(firstResponseLatency); - } - - @Test - public void testUnaryOperations() throws InterruptedException { - StatsRecorderWrapper recorderWrapper = - new StatsRecorderWrapper( - ApiTracerFactory.OperationType.Unary, - SpanName.of("Bigtable", "MutateRow"), - ImmutableMap.of( - BuiltinMeasureConstants.PROJECT_ID.getName(), PROJECT_ID, - BuiltinMeasureConstants.INSTANCE_ID.getName(), INSTANCE_ID, - BuiltinMeasureConstants.APP_PROFILE.getName(), APP_PROFILE_ID, - BuiltinMeasureConstants.CLIENT_NAME.getName(), CLIENT_AND_VERSION), - statsComponent.getStatsRecorder()); - - long operationLatency = 1234; - int attemptCount = 2; - long attemptLatency = 56; - long serverLatency = 78; - long applicationLatency = 901; - long connectivityErrorCount = 15; - long throttlingLatency = 50; - long firstResponseLatency = 90; - - recorderWrapper.putOperationLatencies(operationLatency); - recorderWrapper.putRetryCount(attemptCount); - recorderWrapper.putAttemptLatencies(attemptLatency); - recorderWrapper.putApplicationLatencies(applicationLatency); - recorderWrapper.putGfeLatencies(serverLatency); - recorderWrapper.putGfeMissingHeaders(connectivityErrorCount); - recorderWrapper.putFirstResponseLatencies(firstResponseLatency); - recorderWrapper.putClientBlockingLatencies(throttlingLatency); - - recorderWrapper.recordOperation("UNAVAILABLE", TABLE_ID, ZONE, CLUSTER); - recorderWrapper.recordAttempt("UNAVAILABLE", TABLE_ID, ZONE, CLUSTER); - - Thread.sleep(100); - - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.OPERATION_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "false"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(operationLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.ATTEMPT_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "false"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(attemptLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.RETRY_COUNT_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(attemptCount); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.SERVER_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "false", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(serverLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.APPLICATION_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "false"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(applicationLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.CONNECTIVITY_ERROR_COUNT_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(connectivityErrorCount); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.THROTTLING_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, "Bigtable.MutateRow", - BuiltinMeasureConstants.TABLE, TABLE_ID, - BuiltinMeasureConstants.ZONE, ZONE, - BuiltinMeasureConstants.CLUSTER, CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(throttlingLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.FIRST_RESPONSE_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(firstResponseLatency); - } - - long getAggregationValueAsLong( - View view, - ImmutableMap tags, - String projectId, - String instanceId, - String appProfileId, - ViewManager viewManager) { - ViewData viewData = viewManager.getView(view.getName()); - Map, AggregationData> aggregationMap = - Objects.requireNonNull(viewData).getAggregationMap(); - - List tagValues = new ArrayList<>(); - - for (TagKey column : view.getColumns()) { - if (BuiltinMeasureConstants.PROJECT_ID == column) { - tagValues.add(TagValue.create(projectId)); - } else if (BuiltinMeasureConstants.INSTANCE_ID == column) { - tagValues.add(TagValue.create(instanceId)); - } else if (BuiltinMeasureConstants.APP_PROFILE == column) { - tagValues.add(TagValue.create(appProfileId)); - } else { - tagValues.add(TagValue.create(tags.get(column))); - } - } - - AggregationData aggregationData = aggregationMap.get(tagValues); - - return aggregationData.match( - arg -> (long) arg.getSum(), - AggregationData.SumDataLong::getSum, - arg -> arg.getCount(), - arg -> (long) arg.getMean(), - arg -> (long) arg.getLastValue(), - AggregationData.LastValueDataLong::getLastValue, - arg -> { - throw new UnsupportedOperationException(); - }); - } -} diff --git a/google-cloud-bigtable/clirr-ignored-differences.xml b/google-cloud-bigtable/clirr-ignored-differences.xml index 7ac7946561..034168c2a1 100644 --- a/google-cloud-bigtable/clirr-ignored-differences.xml +++ b/google-cloud-bigtable/clirr-ignored-differences.xml @@ -163,6 +163,12 @@ 8001 com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerBatchedUnaryCallable + + + 7004 + com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory + * + 6001 @@ -188,6 +194,11 @@ * + + 7004 + com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker + * + 7012 com/google/cloud/bigtable/data/v2/models/MutationApi diff --git a/google-cloud-bigtable/pom.xml b/google-cloud-bigtable/pom.xml index b36f9d61bd..311345f2d9 100644 --- a/google-cloud-bigtable/pom.xml +++ b/google-cloud-bigtable/pom.xml @@ -2,7 +2,7 @@ 4.0.0 google-cloud-bigtable - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT jar Google Cloud Bigtable https://github.com/googleapis/java-bigtable @@ -12,11 +12,11 @@ com.google.cloud google-cloud-bigtable-parent - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT google-cloud-bigtable @@ -47,14 +47,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom import com.google.cloud google-cloud-bigtable-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom import @@ -64,19 +64,6 @@ - - com.google.cloud - google-cloud-bigtable-stats - - - - io.opencensus - * - - - com.google.api @@ -229,6 +216,41 @@ threetenbp + + + io.opentelemetry + opentelemetry-api + + + io.opentelemetry + opentelemetry-sdk + + + io.opentelemetry + opentelemetry-sdk-metrics + + + io.opentelemetry + opentelemetry-sdk-common + + + com.google.cloud.opentelemetry + detector-resources-support + + + io.opentelemetry + opentelemetry-sdk-testing + test + + + com.google.cloud + google-cloud-monitoring + + + com.google.api.grpc + proto-google-cloud-monitoring-v3 + + com.google.api @@ -272,23 +294,6 @@ - - com.google.cloud - google-cloud-monitoring - - - - io.perfmark - perfmark-api - - - runtime - - - com.google.api.grpc - proto-google-cloud-monitoring-v3 - runtime - com.google.truth truth diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java index a6aea08983..8a0f3cab40 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java @@ -20,6 +20,6 @@ @InternalApi("For internal use only") public final class Version { // {x-version-update-start:google-cloud-bigtable:current} - public static String VERSION = "2.37.1-SNAPSHOT"; + public static String VERSION = "2.38.1-SNAPSHOT"; // {x-version-update-end} } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/AppProfile.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/AppProfile.java index 2dd75dd5ad..bd7a534640 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/AppProfile.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/AppProfile.java @@ -16,6 +16,7 @@ package com.google.cloud.bigtable.admin.v2.models; import com.google.api.core.InternalApi; +import com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly; import com.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny; import com.google.bigtable.admin.v2.AppProfile.Priority; import com.google.bigtable.admin.v2.AppProfile.StandardIsolation; @@ -81,6 +82,8 @@ public RoutingPolicy getPolicy() { public IsolationPolicy getIsolationPolicy() { if (proto.hasStandardIsolation()) { return new StandardIsolationPolicy(proto.getStandardIsolation()); + } else if (proto.hasDataBoostIsolationReadOnly()) { + return new DataBoostIsolationReadOnlyPolicy(proto.getDataBoostIsolationReadOnly()); } else { // Should never happen because the constructor verifies that one must exist. throw new IllegalStateException(); @@ -409,4 +412,105 @@ public int hashCode() { return Objects.hashCode(proto); } } + + /** Compute Billing Owner specifies how usage should be accounted when using Data Boost. */ + public static enum ComputeBillingOwner { + UNSPECIFIED(DataBoostIsolationReadOnly.ComputeBillingOwner.COMPUTE_BILLING_OWNER_UNSPECIFIED), + HOST_PAYS(DataBoostIsolationReadOnly.ComputeBillingOwner.HOST_PAYS), + UNRECOGNIZED(DataBoostIsolationReadOnly.ComputeBillingOwner.UNRECOGNIZED); + + private final com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + .ComputeBillingOwner + proto; + + /** + * Wraps the protobuf. This method is considered an internal implementation detail and not meant + * to be used by applications. + */ + @InternalApi + public static ComputeBillingOwner fromProto( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner + proto) { + Preconditions.checkNotNull(proto); + + for (ComputeBillingOwner owner : values()) { + if (owner.proto.equals(proto)) { + return owner; + } + } + + return UNRECOGNIZED; + } + + /** + * Creates the request protobuf. This method is considered an internal implementation detail and + * not meant to be used by applications. + */ + @InternalApi + public DataBoostIsolationReadOnly.ComputeBillingOwner toProto() { + return proto; + } + + ComputeBillingOwner(DataBoostIsolationReadOnly.ComputeBillingOwner proto) { + this.proto = proto; + } + } + + /** + * A Data Boost Read Only {@link IsolationPolicy} for running high-throughput read traffic on your + * Bigtable data without affecting application traffic. Data Boost App Profile needs to be created + * with a ComputeBillingOwner which specifies how usage should be accounted when using Data Boost. + */ + public static class DataBoostIsolationReadOnlyPolicy implements IsolationPolicy { + private final DataBoostIsolationReadOnly proto; + + DataBoostIsolationReadOnlyPolicy(DataBoostIsolationReadOnly proto) { + this.proto = proto; + } + + /** + * Creates a new instance of {@link DataBoostIsolationReadOnlyPolicy} with specified {@link + * ComputeBillingOwner}. + */ + public static DataBoostIsolationReadOnlyPolicy of(ComputeBillingOwner billingOwner) { + return new DataBoostIsolationReadOnlyPolicy( + DataBoostIsolationReadOnly.newBuilder() + .setComputeBillingOwner(billingOwner.toProto()) + .build()); + } + + /** + * Gets the {@link ComputeBillingOwner} on the current {@link DataBoostIsolationReadOnlyPolicy} + * instance. + */ + public ComputeBillingOwner getComputeBillingOwner() { + return ComputeBillingOwner.fromProto(proto.getComputeBillingOwner()); + } + + /** + * Creates the request protobuf. This method is considered an internal implementation detail and + * not meant to be used by applications. + */ + @InternalApi + public DataBoostIsolationReadOnly toProto() { + return proto; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DataBoostIsolationReadOnlyPolicy that = (DataBoostIsolationReadOnlyPolicy) o; + return Objects.equal(proto, that.proto); + } + + @Override + public int hashCode() { + return Objects.hashCode(proto); + } + } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequest.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequest.java index b3159c3146..2ad236c07b 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequest.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequest.java @@ -97,11 +97,15 @@ public CreateAppProfileRequest setRoutingPolicy(RoutingPolicy routingPolicy) { /** Sets the isolation policy for all read/write requests that use this app profile. */ public CreateAppProfileRequest setIsolationPolicy(IsolationPolicy isolationPolicy) { Preconditions.checkNotNull(isolationPolicy); - if (isolationPolicy instanceof StandardIsolationPolicy) { proto .getAppProfileBuilder() .setStandardIsolation(((StandardIsolationPolicy) isolationPolicy).toProto()); + } else if (isolationPolicy instanceof AppProfile.DataBoostIsolationReadOnlyPolicy) { + proto + .getAppProfileBuilder() + .setDataBoostIsolationReadOnly( + ((AppProfile.DataBoostIsolationReadOnlyPolicy) isolationPolicy).toProto()); } else { throw new IllegalArgumentException("Unknown policy type: " + isolationPolicy); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequest.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequest.java index b9a45a6f78..b5e14f4f2a 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequest.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequest.java @@ -17,6 +17,7 @@ import com.google.api.core.InternalApi; import com.google.cloud.bigtable.admin.v2.internal.NameUtil; +import com.google.cloud.bigtable.admin.v2.models.AppProfile.DataBoostIsolationReadOnlyPolicy; import com.google.cloud.bigtable.admin.v2.models.AppProfile.IsolationPolicy; import com.google.cloud.bigtable.admin.v2.models.AppProfile.MultiClusterRoutingPolicy; import com.google.cloud.bigtable.admin.v2.models.AppProfile.RoutingPolicy; @@ -132,6 +133,13 @@ public UpdateAppProfileRequest setIsolationPolicy(@Nonnull IsolationPolicy isola .getAppProfileBuilder() .setStandardIsolation(((StandardIsolationPolicy) isolationPolicy).toProto()); updateFieldMask(com.google.bigtable.admin.v2.AppProfile.STANDARD_ISOLATION_FIELD_NUMBER); + } else if (isolationPolicy instanceof DataBoostIsolationReadOnlyPolicy) { + proto + .getAppProfileBuilder() + .setDataBoostIsolationReadOnly( + ((DataBoostIsolationReadOnlyPolicy) isolationPolicy).toProto()); + updateFieldMask( + com.google.bigtable.admin.v2.AppProfile.DATA_BOOST_ISOLATION_READ_ONLY_FIELD_NUMBER); } else { throw new IllegalArgumentException("Unknown policy type: " + isolationPolicy); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java index c35500a189..9b2f2e345f 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java @@ -19,7 +19,10 @@ import com.google.api.gax.core.BackgroundResource; import com.google.api.gax.rpc.ClientContext; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; +import io.opentelemetry.api.OpenTelemetry; import java.io.IOException; +import java.util.logging.Level; +import java.util.logging.Logger; import javax.annotation.Nonnull; /** @@ -62,8 +65,12 @@ */ @BetaApi("This feature is currently experimental and can change in the future") public final class BigtableDataClientFactory implements AutoCloseable { + + private static final Logger logger = Logger.getLogger(BigtableDataClientFactory.class.getName()); + private final BigtableDataSettings defaultSettings; private final ClientContext sharedClientContext; + private final OpenTelemetry openTelemetry; /** * Create a instance of this factory. @@ -75,13 +82,28 @@ public static BigtableDataClientFactory create(BigtableDataSettings defaultSetti throws IOException { ClientContext sharedClientContext = EnhancedBigtableStub.createClientContext(defaultSettings.getStubSettings()); - return new BigtableDataClientFactory(sharedClientContext, defaultSettings); + OpenTelemetry openTelemetry = null; + try { + // We don't want client side metrics to crash the client, so catch any exception when getting + // the OTEL instance and log the exception instead. + openTelemetry = + EnhancedBigtableStub.getOpenTelemetry( + defaultSettings.getProjectId(), + defaultSettings.getMetricsProvider(), + sharedClientContext.getCredentials()); + } catch (Throwable t) { + logger.log(Level.WARNING, "Failed to get OTEL, will skip exporting client side metrics", t); + } + return new BigtableDataClientFactory(sharedClientContext, defaultSettings, openTelemetry); } private BigtableDataClientFactory( - ClientContext sharedClientContext, BigtableDataSettings defaultSettings) { + ClientContext sharedClientContext, + BigtableDataSettings defaultSettings, + OpenTelemetry openTelemetry) { this.sharedClientContext = sharedClientContext; this.defaultSettings = defaultSettings; + this.openTelemetry = openTelemetry; } /** @@ -112,7 +134,7 @@ public BigtableDataClient createDefault() { .toBuilder() .setTracerFactory( EnhancedBigtableStub.createBigtableTracerFactory( - defaultSettings.getStubSettings())) + defaultSettings.getStubSettings(), openTelemetry)) .build(); return BigtableDataClient.createWithClientContext(defaultSettings, clientContext); @@ -140,7 +162,8 @@ public BigtableDataClient createForAppProfile(@Nonnull String appProfileId) thro sharedClientContext .toBuilder() .setTracerFactory( - EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings())) + EnhancedBigtableStub.createBigtableTracerFactory( + settings.getStubSettings(), openTelemetry)) .build(); return BigtableDataClient.createWithClientContext(settings, clientContext); } @@ -168,7 +191,8 @@ public BigtableDataClient createForInstance(@Nonnull String projectId, @Nonnull sharedClientContext .toBuilder() .setTracerFactory( - EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings())) + EnhancedBigtableStub.createBigtableTracerFactory( + settings.getStubSettings(), openTelemetry)) .build(); return BigtableDataClient.createWithClientContext(settings, clientContext); @@ -197,7 +221,8 @@ public BigtableDataClient createForInstance( sharedClientContext .toBuilder() .setTracerFactory( - EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings())) + EnhancedBigtableStub.createBigtableTracerFactory( + settings.getStubSettings(), openTelemetry)) .build(); return BigtableDataClient.createWithClientContext(settings, clientContext); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java index 701a5e8e49..928159aa6d 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java @@ -25,19 +25,16 @@ import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; import com.google.api.gax.rpc.UnaryCallSettings; import com.google.auth.Credentials; -import com.google.auth.oauth2.GoogleCredentials; import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.stub.BigtableBatchingCallSettings; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; -import com.google.cloud.bigtable.stats.BigtableStackdriverStatsExporter; -import com.google.cloud.bigtable.stats.BuiltinViews; +import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider; import com.google.common.base.MoreObjects; import com.google.common.base.Strings; import io.grpc.ManagedChannelBuilder; import java.io.IOException; import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Logger; import javax.annotation.Nonnull; import javax.annotation.Nullable; @@ -77,7 +74,10 @@ public final class BigtableDataSettings { private static final Logger LOGGER = Logger.getLogger(BigtableDataSettings.class.getName()); private static final String BIGTABLE_EMULATOR_HOST_ENV_VAR = "BIGTABLE_EMULATOR_HOST"; - private static final AtomicBoolean BUILTIN_METRICS_REGISTERED = new AtomicBoolean(false); + // This is the legacy credential override used in the deprecated enableBuiltinMetrics method to + // override the default credentials set on the Bigtable client. Keeping it for backward + // compatibility. + @Deprecated @Nullable private static Credentials legacyMetricCredentialOverride; private final EnhancedBigtableStubSettings stubSettings; @@ -197,23 +197,34 @@ public static void enableGfeOpenCensusStats() { com.google.cloud.bigtable.data.v2.stub.metrics.RpcViews.registerBigtableClientGfeViews(); } - /** Register built in metrics. */ - public static void enableBuiltinMetrics() throws IOException { - if (BUILTIN_METRICS_REGISTERED.compareAndSet(false, true)) { - BuiltinViews.registerBigtableBuiltinViews(); - BigtableStackdriverStatsExporter.register(GoogleCredentials.getApplicationDefault()); - } - } + /** + * Register built in metrics. + * + * @deprecated This is a no-op that doesn't do anything. Builtin metrics are enabled by default + * now. Please refer to {@link + * BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)} on how to enable or + * disable built-in metrics. + */ + @Deprecated + public static void enableBuiltinMetrics() throws IOException {} /** * Register built in metrics with credentials. The credentials need to have metric write access * for all the projects you're publishing to. + * + * @deprecated This is a no-op that doesn't do anything. Builtin metrics are enabled by default + * now. Please refer {@link BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)} + * on how to enable or disable built-in metrics. */ + @Deprecated public static void enableBuiltinMetrics(Credentials credentials) throws IOException { - if (BUILTIN_METRICS_REGISTERED.compareAndSet(false, true)) { - BuiltinViews.registerBigtableBuiltinViews(); - BigtableStackdriverStatsExporter.register(credentials); - } + BigtableDataSettings.legacyMetricCredentialOverride = credentials; + } + + /** Get the metrics credentials if it's set by {@link #enableBuiltinMetrics(Credentials)}. */ + @InternalApi + public static Credentials getMetricsCredentials() { + return legacyMetricCredentialOverride; } /** Returns the target project id. */ @@ -278,6 +289,11 @@ public boolean isBulkMutationFlowControlEnabled() { return stubSettings.bulkMutateRowsSettings().isServerInitiatedFlowControlEnabled(); } + /** Gets the {@link MetricsProvider}. * */ + public MetricsProvider getMetricsProvider() { + return stubSettings.getMetricsProvider(); + } + /** Returns the underlying RPC settings. */ public EnhancedBigtableStubSettings getStubSettings() { return stubSettings; @@ -527,6 +543,30 @@ public boolean isBulkMutationFlowControlEnabled() { return stubSettings.bulkMutateRowsSettings().isServerInitiatedFlowControlEnabled(); } + /** + * Sets the {@link MetricsProvider}. + * + *

By default, this is set to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider#INSTANCE} which will + * collect and export client side metrics. + * + *

To disable client side metrics, set it to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider#INSTANCE}. + * + *

To use a custom OpenTelemetry instance, refer to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider} on how to + * set it up. + */ + public Builder setMetricsProvider(MetricsProvider metricsProvider) { + stubSettings.setMetricsProvider(metricsProvider); + return this; + } + + /** Gets the {@link MetricsProvider}. */ + public MetricsProvider getMetricsProvider() { + return stubSettings.getMetricsProvider(); + } + /** * Returns the underlying settings for making RPC calls. The settings should be changed with * care. diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java index ec15c4131a..57d9748cca 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java @@ -15,6 +15,11 @@ */ package com.google.cloud.bigtable.data.v2.stub; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY; + import com.google.api.core.ApiFunction; import com.google.api.core.BetaApi; import com.google.api.core.InternalApi; @@ -68,6 +73,7 @@ import com.google.bigtable.v2.RowRange; import com.google.bigtable.v2.SampleRowKeysResponse; import com.google.cloud.bigtable.Version; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.internal.JwtCredentialsWithAudience; import com.google.cloud.bigtable.data.v2.internal.NameUtil; import com.google.cloud.bigtable.data.v2.internal.RequestContext; @@ -97,8 +103,12 @@ import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerUnaryCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTracerFactory; import com.google.cloud.bigtable.data.v2.stub.metrics.CompositeTracerFactory; +import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider; +import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider; import com.google.cloud.bigtable.data.v2.stub.metrics.ErrorCountPerConnectionMetricTracker; +import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider; import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsTracerFactory; +import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider; import com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants; import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersServerStreamingCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersUnaryCallable; @@ -130,6 +140,8 @@ import io.opencensus.tags.TagValue; import io.opencensus.tags.Tagger; import io.opencensus.tags.Tags; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -137,6 +149,8 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; import javax.annotation.Nonnull; import javax.annotation.Nullable; @@ -154,6 +168,9 @@ */ @InternalApi public class EnhancedBigtableStub implements AutoCloseable { + + private static final Logger logger = Logger.getLogger(EnhancedBigtableStub.class.getName()); + private static final String CLIENT_NAME = "Bigtable"; private static final long FLOW_CONTROL_ADJUSTING_INTERVAL_MS = TimeUnit.SECONDS.toMillis(20); private final EnhancedBigtableStubSettings settings; @@ -185,10 +202,25 @@ public class EnhancedBigtableStub implements AutoCloseable { public static EnhancedBigtableStub create(EnhancedBigtableStubSettings settings) throws IOException { - settings = settings.toBuilder().setTracerFactory(createBigtableTracerFactory(settings)).build(); ClientContext clientContext = createClientContext(settings); - - return new EnhancedBigtableStub(settings, clientContext); + OpenTelemetry openTelemetry = null; + try { + // We don't want client side metrics to crash the client, so catch any exception when getting + // the OTEL instance and log the exception instead. + openTelemetry = + getOpenTelemetry( + settings.getProjectId(), + settings.getMetricsProvider(), + clientContext.getCredentials()); + } catch (Throwable t) { + logger.log(Level.WARNING, "Failed to get OTEL, will skip exporting client side metrics", t); + } + ClientContext contextWithTracer = + clientContext + .toBuilder() + .setTracerFactory(createBigtableTracerFactory(settings, openTelemetry)) + .build(); + return new EnhancedBigtableStub(settings, contextWithTracer); } public static EnhancedBigtableStub createWithClientContext( @@ -207,15 +239,33 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set // workaround JWT audience issues patchCredentials(builder); + // Fix the credentials so that they can be shared + Credentials credentials = null; + if (builder.getCredentialsProvider() != null) { + credentials = builder.getCredentialsProvider().getCredentials(); + } + builder.setCredentialsProvider(FixedCredentialsProvider.create(credentials)); + InstantiatingGrpcChannelProvider.Builder transportProvider = builder.getTransportChannelProvider() instanceof InstantiatingGrpcChannelProvider ? ((InstantiatingGrpcChannelProvider) builder.getTransportChannelProvider()).toBuilder() : null; + OpenTelemetry openTelemetry = null; + try { + // We don't want client side metrics to crash the client, so catch any exception when getting + // the OTEL instance and log the exception instead. + openTelemetry = + getOpenTelemetry(settings.getProjectId(), settings.getMetricsProvider(), credentials); + } catch (Throwable t) { + logger.log(Level.WARNING, "Failed to get OTEL, will skip exporting client side metrics", t); + } ErrorCountPerConnectionMetricTracker errorCountPerConnectionMetricTracker; - if (transportProvider != null) { + // Skip setting up ErrorCountPerConnectionMetricTracker if openTelemetry is null + if (openTelemetry != null && transportProvider != null) { errorCountPerConnectionMetricTracker = - new ErrorCountPerConnectionMetricTracker(createBuiltinAttributes(builder)); + new ErrorCountPerConnectionMetricTracker( + openTelemetry, createBuiltinAttributes(settings)); ApiFunction oldChannelConfigurator = transportProvider.getChannelConfigurator(); transportProvider.setChannelConfigurator( @@ -237,12 +287,6 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set // Inject channel priming if (settings.isRefreshingChannel()) { - // Fix the credentials so that they can be shared - Credentials credentials = null; - if (builder.getCredentialsProvider() != null) { - credentials = builder.getCredentialsProvider().getCredentials(); - } - builder.setCredentialsProvider(FixedCredentialsProvider.create(credentials)); if (transportProvider != null) { transportProvider.setChannelPrimer( @@ -267,13 +311,19 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set } public static ApiTracerFactory createBigtableTracerFactory( - EnhancedBigtableStubSettings settings) { - return createBigtableTracerFactory(settings, Tags.getTagger(), Stats.getStatsRecorder()); + EnhancedBigtableStubSettings settings, @Nullable OpenTelemetry openTelemetry) + throws IOException { + return createBigtableTracerFactory( + settings, Tags.getTagger(), Stats.getStatsRecorder(), openTelemetry); } @VisibleForTesting public static ApiTracerFactory createBigtableTracerFactory( - EnhancedBigtableStubSettings settings, Tagger tagger, StatsRecorder stats) { + EnhancedBigtableStubSettings settings, + Tagger tagger, + StatsRecorder stats, + @Nullable OpenTelemetry openTelemetry) + throws IOException { String projectId = settings.getProjectId(); String instanceId = settings.getInstanceId(); String appProfileId = settings.getAppProfileId(); @@ -284,10 +334,10 @@ public static ApiTracerFactory createBigtableTracerFactory( .put(RpcMeasureConstants.BIGTABLE_INSTANCE_ID, TagValue.create(instanceId)) .put(RpcMeasureConstants.BIGTABLE_APP_PROFILE_ID, TagValue.create(appProfileId)) .build(); - ImmutableMap builtinAttributes = createBuiltinAttributes(settings.toBuilder()); - return new CompositeTracerFactory( - ImmutableList.of( + ImmutableList.Builder tracerFactories = ImmutableList.builder(); + tracerFactories + .add( // Add OpenCensus Tracing new OpencensusTracerFactory( ImmutableMap.builder() @@ -299,22 +349,52 @@ public static ApiTracerFactory createBigtableTracerFactory( .put("gax", GaxGrpcProperties.getGaxGrpcVersion()) .put("grpc", GaxGrpcProperties.getGrpcVersion()) .put("gapic", Version.VERSION) - .build()), - // Add OpenCensus Metrics - MetricsTracerFactory.create(tagger, stats, attributes), - BuiltinMetricsTracerFactory.create(builtinAttributes), - // Add user configured tracer - settings.getTracerFactory())); + .build())) + // Add OpenCensus Metrics + .add(MetricsTracerFactory.create(tagger, stats, attributes)) + // Add user configured tracer + .add(settings.getTracerFactory()); + BuiltinMetricsTracerFactory builtinMetricsTracerFactory = + openTelemetry != null + ? BuiltinMetricsTracerFactory.create(openTelemetry, createBuiltinAttributes(settings)) + : null; + if (builtinMetricsTracerFactory != null) { + tracerFactories.add(builtinMetricsTracerFactory); + } + return new CompositeTracerFactory(tracerFactories.build()); + } + + @Nullable + public static OpenTelemetry getOpenTelemetry( + String projectId, MetricsProvider metricsProvider, @Nullable Credentials defaultCredentials) + throws IOException { + if (metricsProvider instanceof CustomOpenTelemetryMetricsProvider) { + CustomOpenTelemetryMetricsProvider customMetricsProvider = + (CustomOpenTelemetryMetricsProvider) metricsProvider; + return customMetricsProvider.getOpenTelemetry(); + } else if (metricsProvider instanceof DefaultMetricsProvider) { + Credentials credentials = + BigtableDataSettings.getMetricsCredentials() != null + ? BigtableDataSettings.getMetricsCredentials() + : defaultCredentials; + DefaultMetricsProvider defaultMetricsProvider = (DefaultMetricsProvider) metricsProvider; + return defaultMetricsProvider.getOpenTelemetry(projectId, credentials); + } else if (metricsProvider instanceof NoopMetricsProvider) { + return null; + } + throw new IOException("Invalid MetricsProvider type " + metricsProvider); } - private static ImmutableMap createBuiltinAttributes( - EnhancedBigtableStubSettings.Builder builder) { - return ImmutableMap.builder() - .put("project_id", builder.getProjectId()) - .put("instance", builder.getInstanceId()) - .put("app_profile", builder.getAppProfileId()) - .put("client_name", "bigtable-java/" + Version.VERSION) - .build(); + private static Attributes createBuiltinAttributes(EnhancedBigtableStubSettings settings) { + return Attributes.of( + BIGTABLE_PROJECT_ID_KEY, + settings.getProjectId(), + INSTANCE_ID_KEY, + settings.getInstanceId(), + APP_PROFILE_KEY, + settings.getAppProfileId(), + CLIENT_NAME_KEY, + "bigtable-java/" + Version.VERSION); } private static void patchCredentials(EnhancedBigtableStubSettings.Builder settings) diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java index 9a5027c740..f07a8fb7fc 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java @@ -44,6 +44,8 @@ import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider; +import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider; import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor; import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsBatchingDescriptor; import com.google.common.base.MoreObjects; @@ -229,6 +231,8 @@ public class EnhancedBigtableStubSettings extends StubSettings getJwtAudienceMapping() { return jwtAudienceMapping; } + public MetricsProvider getMetricsProvider() { + return metricsProvider; + } + /** * Gets if routing cookie is enabled. If true, client will retry a request with extra metadata * server sent back. @@ -636,6 +645,8 @@ public static class Builder extends StubSettings.Builder jwtAudienceMapping) { return this; } + /** + * Sets the {@link MetricsProvider}. + * + *

By default, this is set to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider#INSTANCE} which will + * collect and export client side metrics. + * + *

To disable client side metrics, set it to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider#INSTANCE}. + * + *

To use a custom OpenTelemetry instance, refer to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider} on how to + * set it up. + */ + public Builder setMetricsProvider(MetricsProvider metricsProvider) { + this.metricsProvider = Preconditions.checkNotNull(metricsProvider); + return this; + } + + /** Gets the {@link MetricsProvider}. */ + public MetricsProvider getMetricsProvider() { + return this.metricsProvider; + } + @InternalApi("Used for internal testing") public Map getJwtAudienceMapping() { return jwtAudienceMapping; @@ -1028,6 +1067,11 @@ public EnhancedBigtableStubSettings build() { featureFlags.setRoutingCookie(this.getEnableRoutingCookie()); featureFlags.setRetryInfo(this.getEnableRetryInfo()); + // client_Side_metrics_enabled feature flag is only set when a user is running with a + // DefaultMetricsProvider. This may cause false negatives when a user registered the + // metrics on their CustomOpenTelemetryMetricsProvider. + featureFlags.setClientSideMetricsEnabled( + this.getMetricsProvider() instanceof DefaultMetricsProvider); // Serialize the web64 encode the bigtable feature flags ByteArrayOutputStream boas = new ByteArrayOutputStream(); @@ -1080,6 +1124,7 @@ public String toString() { generateInitialChangeStreamPartitionsSettings) .add("readChangeStreamSettings", readChangeStreamSettings) .add("pingAndWarmSettings", pingAndWarmSettings) + .add("metricsProvider", metricsProvider) .add("parent", super.toString()) .toString(); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java index 6208fce89e..97cc2f73ec 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java @@ -86,7 +86,7 @@ public void call( stopwatch.stop(); if (context.getTracer() instanceof BigtableTracer) { ((BigtableTracer) context.getTracer()) - .batchRequestThrottled(stopwatch.elapsed(TimeUnit.MILLISECONDS)); + .batchRequestThrottled(stopwatch.elapsed(TimeUnit.NANOSECONDS)); } RateLimitingResponseObserver innerObserver = new RateLimitingResponseObserver(limiter, lastQpsChangeTime, responseObserver); diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java new file mode 100644 index 0000000000..81473ae4d4 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java @@ -0,0 +1,364 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME; + +import com.google.api.MonitoredResource; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.InternalApi; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.auth.Credentials; +import com.google.cloud.monitoring.v3.MetricServiceClient; +import com.google.cloud.monitoring.v3.MetricServiceSettings; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.monitoring.v3.CreateTimeSeriesRequest; +import com.google.monitoring.v3.ProjectName; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Empty; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import org.threeten.bp.Duration; + +/** + * Bigtable Cloud Monitoring OpenTelemetry Exporter. + * + *

The exporter will look for all bigtable owned metrics under bigtable.googleapis.com + * instrumentation scope and upload it via the Google Cloud Monitoring API. + */ +@InternalApi +public final class BigtableCloudMonitoringExporter implements MetricExporter { + + private static final Logger logger = + Logger.getLogger(BigtableCloudMonitoringExporter.class.getName()); + + // This system property can be used to override the monitoring endpoint + // to a different environment. It's meant for internal testing only. + private static final String MONITORING_ENDPOINT = + MoreObjects.firstNonNull( + System.getProperty("bigtable.test-monitoring-endpoint"), + MetricServiceSettings.getDefaultEndpoint()); + + private static final String APPLICATION_RESOURCE_PROJECT_ID = "project_id"; + + private final MetricServiceClient client; + + private final String bigtableProjectId; + private final String taskId; + + // The resource the client application is running on + private final MonitoredResource applicationResource; + + private final AtomicBoolean isShutdown = new AtomicBoolean(false); + + private CompletableResultCode lastExportCode; + + private static final ImmutableList BIGTABLE_TABLE_METRICS = + ImmutableSet.of( + OPERATION_LATENCIES_NAME, + ATTEMPT_LATENCIES_NAME, + SERVER_LATENCIES_NAME, + FIRST_RESPONSE_LATENCIES_NAME, + CLIENT_BLOCKING_LATENCIES_NAME, + APPLICATION_BLOCKING_LATENCIES_NAME, + RETRY_COUNT_NAME, + CONNECTIVITY_ERROR_COUNT_NAME) + .stream() + .map(m -> METER_NAME + m) + .collect(ImmutableList.toImmutableList()); + + private static final ImmutableList APPLICATION_METRICS = + ImmutableSet.of(PER_CONNECTION_ERROR_COUNT_NAME).stream() + .map(m -> METER_NAME + m) + .collect(ImmutableList.toImmutableList()); + + public static BigtableCloudMonitoringExporter create( + String projectId, @Nullable Credentials credentials) throws IOException { + MetricServiceSettings.Builder settingsBuilder = MetricServiceSettings.newBuilder(); + CredentialsProvider credentialsProvider = + Optional.ofNullable(credentials) + .map(FixedCredentialsProvider::create) + .orElse(NoCredentialsProvider.create()); + settingsBuilder.setCredentialsProvider(credentialsProvider); + settingsBuilder.setEndpoint(MONITORING_ENDPOINT); + + org.threeten.bp.Duration timeout = Duration.ofMinutes(1); + // TODO: createServiceTimeSeries needs special handling if the request failed. Leaving + // it as not retried for now. + settingsBuilder.createServiceTimeSeriesSettings().setSimpleTimeoutNoRetries(timeout); + + // Detect the resource that the client application is running on. For example, + // this could be a GCE instance or a GKE pod. Currently, we only support GCE instance and + // GKE pod. This method will return null for everything else. + MonitoredResource applicationResource = null; + try { + applicationResource = BigtableExporterUtils.detectResource(); + } catch (Exception e) { + logger.log( + Level.WARNING, + "Failed to detect resource, will skip exporting application level metrics ", + e); + } + + return new BigtableCloudMonitoringExporter( + projectId, + MetricServiceClient.create(settingsBuilder.build()), + applicationResource, + BigtableExporterUtils.getDefaultTaskValue()); + } + + @VisibleForTesting + BigtableCloudMonitoringExporter( + String projectId, + MetricServiceClient client, + @Nullable MonitoredResource applicationResource, + String taskId) { + this.client = client; + this.taskId = taskId; + this.applicationResource = applicationResource; + this.bigtableProjectId = projectId; + } + + @Override + public CompletableResultCode export(Collection collection) { + if (isShutdown.get()) { + logger.log(Level.WARNING, "Exporter is shutting down"); + return CompletableResultCode.ofFailure(); + } + + CompletableResultCode bigtableExportCode = exportBigtableResourceMetrics(collection); + CompletableResultCode applicationExportCode = exportApplicationResourceMetrics(collection); + + lastExportCode = + CompletableResultCode.ofAll(ImmutableList.of(applicationExportCode, bigtableExportCode)); + + return lastExportCode; + } + + /** Export metrics associated with a BigtableTable resource. */ + private CompletableResultCode exportBigtableResourceMetrics(Collection collection) { + // Filter bigtable table metrics + List bigtableMetricData = + collection.stream() + .filter(md -> BIGTABLE_TABLE_METRICS.contains(md.getName())) + .collect(Collectors.toList()); + + // Skips exporting if there's none + if (bigtableMetricData.isEmpty()) { + return CompletableResultCode.ofSuccess(); + } + + // Verifies metrics project id are the same as the bigtable project id set on this client + if (!bigtableMetricData.stream() + .flatMap(metricData -> metricData.getData().getPoints().stream()) + .allMatch(pd -> bigtableProjectId.equals(BigtableExporterUtils.getProjectId(pd)))) { + logger.log(Level.WARNING, "Metric data has different a projectId. Skip exporting."); + return CompletableResultCode.ofFailure(); + } + + List bigtableTimeSeries; + try { + bigtableTimeSeries = + BigtableExporterUtils.convertToBigtableTimeSeries(bigtableMetricData, taskId); + } catch (Throwable e) { + logger.log( + Level.WARNING, + "Failed to convert bigtable table metric data to cloud monitoring timeseries.", + e); + return CompletableResultCode.ofFailure(); + } + + ProjectName projectName = ProjectName.of(bigtableProjectId); + CreateTimeSeriesRequest bigtableRequest = + CreateTimeSeriesRequest.newBuilder() + .setName(projectName.toString()) + .addAllTimeSeries(bigtableTimeSeries) + .build(); + + ApiFuture future = + this.client.createServiceTimeSeriesCallable().futureCall(bigtableRequest); + + CompletableResultCode bigtableExportCode = new CompletableResultCode(); + ApiFutures.addCallback( + future, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable throwable) { + logger.log( + Level.WARNING, + "createServiceTimeSeries request failed for bigtable metrics. ", + throwable); + bigtableExportCode.fail(); + } + + @Override + public void onSuccess(Empty empty) { + bigtableExportCode.succeed(); + } + }, + MoreExecutors.directExecutor()); + + return bigtableExportCode; + } + + /** Export metrics associated with the resource the Application is running on. */ + private CompletableResultCode exportApplicationResourceMetrics( + Collection collection) { + if (applicationResource == null) { + return CompletableResultCode.ofSuccess(); + } + + // Filter application level metrics + List metricData = + collection.stream() + .filter(md -> APPLICATION_METRICS.contains(md.getName())) + .collect(Collectors.toList()); + + // Skip exporting if there's none + if (metricData.isEmpty()) { + return CompletableResultCode.ofSuccess(); + } + + List timeSeries; + try { + timeSeries = + BigtableExporterUtils.convertToApplicationResourceTimeSeries( + metricData, taskId, applicationResource); + } catch (Throwable e) { + logger.log( + Level.WARNING, + "Failed to convert application metric data to cloud monitoring timeseries.", + e); + return CompletableResultCode.ofFailure(); + } + + // Construct the request. The project id will be the project id of the detected monitored + // resource. + ApiFuture gceOrGkeFuture; + CompletableResultCode exportCode = new CompletableResultCode(); + try { + ProjectName projectName = + ProjectName.of(applicationResource.getLabelsOrThrow(APPLICATION_RESOURCE_PROJECT_ID)); + CreateTimeSeriesRequest request = + CreateTimeSeriesRequest.newBuilder() + .setName(projectName.toString()) + .addAllTimeSeries(timeSeries) + .build(); + + gceOrGkeFuture = this.client.createServiceTimeSeriesCallable().futureCall(request); + + ApiFutures.addCallback( + gceOrGkeFuture, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable throwable) { + logger.log( + Level.WARNING, + "createServiceTimeSeries request failed for per connection error metrics.", + throwable); + exportCode.fail(); + } + + @Override + public void onSuccess(Empty empty) { + exportCode.succeed(); + } + }, + MoreExecutors.directExecutor()); + + } catch (Exception e) { + logger.log( + Level.WARNING, + "Failed to get projectName for application resource " + applicationResource); + return CompletableResultCode.ofFailure(); + } + + return exportCode; + } + + @Override + public CompletableResultCode flush() { + if (lastExportCode != null) { + return lastExportCode; + } + return CompletableResultCode.ofSuccess(); + } + + @Override + public CompletableResultCode shutdown() { + if (!isShutdown.compareAndSet(false, true)) { + logger.log(Level.WARNING, "shutdown is called multiple times"); + return CompletableResultCode.ofSuccess(); + } + CompletableResultCode flushResult = flush(); + CompletableResultCode shutdownResult = new CompletableResultCode(); + flushResult.whenComplete( + () -> { + Throwable throwable = null; + try { + client.shutdown(); + } catch (Throwable e) { + logger.log(Level.WARNING, "failed to shutdown the monitoring client", e); + throwable = e; + } + if (throwable != null) { + shutdownResult.fail(); + } else { + shutdownResult.succeed(); + } + }); + return CompletableResultCode.ofAll(Arrays.asList(flushResult, shutdownResult)); + } + + /** + * For Google Cloud Monitoring always return CUMULATIVE to keep track of the cumulative value of a + * metric over time. + */ + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return AggregationTemporality.CUMULATIVE; + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java new file mode 100644 index 0000000000..5bf6688e17 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java @@ -0,0 +1,367 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import static com.google.api.Distribution.BucketOptions; +import static com.google.api.Distribution.BucketOptions.Explicit; +import static com.google.api.MetricDescriptor.MetricKind; +import static com.google.api.MetricDescriptor.MetricKind.CUMULATIVE; +import static com.google.api.MetricDescriptor.MetricKind.GAUGE; +import static com.google.api.MetricDescriptor.MetricKind.UNRECOGNIZED; +import static com.google.api.MetricDescriptor.ValueType; +import static com.google.api.MetricDescriptor.ValueType.DISTRIBUTION; +import static com.google.api.MetricDescriptor.ValueType.DOUBLE; +import static com.google.api.MetricDescriptor.ValueType.INT64; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; + +import com.google.api.Distribution; +import com.google.api.Metric; +import com.google.api.MonitoredResource; +import com.google.cloud.opentelemetry.detection.AttributeKeys; +import com.google.cloud.opentelemetry.detection.DetectedPlatform; +import com.google.cloud.opentelemetry.detection.GCPPlatformDetector; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableSet; +import com.google.monitoring.v3.Point; +import com.google.monitoring.v3.TimeInterval; +import com.google.monitoring.v3.TimeSeries; +import com.google.monitoring.v3.TypedValue; +import com.google.protobuf.util.Timestamps; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.DoublePointData; +import io.opentelemetry.sdk.metrics.data.HistogramData; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.MetricDataType; +import io.opentelemetry.sdk.metrics.data.PointData; +import io.opentelemetry.sdk.metrics.data.SumData; +import java.lang.management.ManagementFactory; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** Utils to convert OpenTelemetry types to Google Cloud Monitoring types. */ +class BigtableExporterUtils { + + private static final Logger logger = Logger.getLogger(BigtableExporterUtils.class.getName()); + + private static final String BIGTABLE_RESOURCE_TYPE = "bigtable_client_raw"; + + // These metric labels will be promoted to the bigtable_table monitored resource fields + private static final Set> BIGTABLE_PROMOTED_RESOURCE_LABELS = + ImmutableSet.of( + BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, TABLE_ID_KEY, CLUSTER_ID_KEY, ZONE_ID_KEY); + + private BigtableExporterUtils() {} + + /** + * In most cases this should look like java-${UUID}@${hostname}. The hostname will be retrieved + * from the jvm name and fallback to the local hostname. + */ + static String getDefaultTaskValue() { + // Something like '@' + final String jvmName = ManagementFactory.getRuntimeMXBean().getName(); + // If jvm doesn't have the expected format, fallback to the local hostname + if (jvmName.indexOf('@') < 1) { + String hostname = "localhost"; + try { + hostname = InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + logger.log(Level.INFO, "Unable to get the hostname.", e); + } + // Generate a random number and use the same format "random_number@hostname". + return "java-" + UUID.randomUUID() + "@" + hostname; + } + return "java-" + UUID.randomUUID() + jvmName; + } + + static String getProjectId(PointData pointData) { + return pointData.getAttributes().get(BIGTABLE_PROJECT_ID_KEY); + } + + static List convertToBigtableTimeSeries(List collection, String taskId) { + List allTimeSeries = new ArrayList<>(); + + for (MetricData metricData : collection) { + if (!metricData.getInstrumentationScopeInfo().getName().equals(METER_NAME)) { + // Filter out metric data for instruments that are not part of the bigtable builtin metrics + continue; + } + metricData.getData().getPoints().stream() + .map(pointData -> convertPointToBigtableTimeSeries(metricData, pointData, taskId)) + .forEach(allTimeSeries::add); + } + + return allTimeSeries; + } + + static List convertToApplicationResourceTimeSeries( + Collection collection, String taskId, MonitoredResource applicationResource) { + Preconditions.checkNotNull( + applicationResource, + "convert application metrics is called when the supported resource is not detected"); + List allTimeSeries = new ArrayList<>(); + for (MetricData metricData : collection) { + if (!metricData.getInstrumentationScopeInfo().getName().equals(METER_NAME)) { + // Filter out metric data for instruments that are not part of the bigtable builtin metrics + continue; + } + metricData.getData().getPoints().stream() + .map( + pointData -> + convertPointToApplicationResourceTimeSeries( + metricData, pointData, taskId, applicationResource)) + .forEach(allTimeSeries::add); + } + return allTimeSeries; + } + + @Nullable + static MonitoredResource detectResource() { + GCPPlatformDetector detector = GCPPlatformDetector.DEFAULT_INSTANCE; + DetectedPlatform detectedPlatform = detector.detectPlatform(); + MonitoredResource monitoredResource = null; + try { + switch (detectedPlatform.getSupportedPlatform()) { + case GOOGLE_COMPUTE_ENGINE: + monitoredResource = + createGceMonitoredResource( + detectedPlatform.getProjectId(), detectedPlatform.getAttributes()); + break; + case GOOGLE_KUBERNETES_ENGINE: + monitoredResource = + createGkeMonitoredResource( + detectedPlatform.getProjectId(), detectedPlatform.getAttributes()); + break; + } + } catch (IllegalStateException e) { + logger.log( + Level.WARNING, + "Failed to create monitored resource for " + detectedPlatform.getSupportedPlatform(), + e); + } + return monitoredResource; + } + + private static MonitoredResource createGceMonitoredResource( + String projectId, Map attributes) { + return MonitoredResource.newBuilder() + .setType("gce_instance") + .putLabels("project_id", projectId) + .putLabels("instance_id", getAttribute(attributes, AttributeKeys.GCE_INSTANCE_ID)) + .putLabels("zone", getAttribute(attributes, AttributeKeys.GCE_AVAILABILITY_ZONE)) + .build(); + } + + private static MonitoredResource createGkeMonitoredResource( + String projectId, Map attributes) { + return MonitoredResource.newBuilder() + .setType("k8s_container") + .putLabels("project_id", projectId) + .putLabels("location", getAttribute(attributes, AttributeKeys.GKE_CLUSTER_LOCATION)) + .putLabels("cluster_name", getAttribute(attributes, AttributeKeys.GKE_CLUSTER_NAME)) + .putLabels("namespace_name", MoreObjects.firstNonNull(System.getenv("NAMESPACE"), "")) + .putLabels("pod_name", MoreObjects.firstNonNull(System.getenv("HOSTNAME"), "")) + .putLabels("container_name", MoreObjects.firstNonNull(System.getenv("CONTAINER_NAME"), "")) + .build(); + } + + private static String getAttribute(Map attributes, String key) { + String value = attributes.get(key); + if (value == null) { + throw new IllegalStateException( + "Required attribute " + key + " does not exist in the attributes map " + attributes); + } + return value; + } + + private static TimeSeries convertPointToBigtableTimeSeries( + MetricData metricData, PointData pointData, String taskId) { + TimeSeries.Builder builder = + TimeSeries.newBuilder() + .setMetricKind(convertMetricKind(metricData)) + .setValueType(convertValueType(metricData.getType())); + Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName()); + + Attributes attributes = pointData.getAttributes(); + MonitoredResource.Builder monitoredResourceBuilder = + MonitoredResource.newBuilder().setType(BIGTABLE_RESOURCE_TYPE); + + for (AttributeKey key : attributes.asMap().keySet()) { + if (BIGTABLE_PROMOTED_RESOURCE_LABELS.contains(key)) { + monitoredResourceBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); + } else { + metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); + } + } + + builder.setResource(monitoredResourceBuilder.build()); + + metricBuilder.putLabels(CLIENT_UID_KEY.getKey(), taskId); + builder.setMetric(metricBuilder.build()); + + TimeInterval timeInterval = + TimeInterval.newBuilder() + .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos())) + .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos())) + .build(); + + builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval)); + + return builder.build(); + } + + private static TimeSeries convertPointToApplicationResourceTimeSeries( + MetricData metricData, + PointData pointData, + String taskId, + MonitoredResource applicationResource) { + TimeSeries.Builder builder = + TimeSeries.newBuilder() + .setMetricKind(convertMetricKind(metricData)) + .setValueType(convertValueType(metricData.getType())) + .setResource(applicationResource); + + Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName()); + + Attributes attributes = pointData.getAttributes(); + for (AttributeKey key : attributes.asMap().keySet()) { + metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); + } + + metricBuilder.putLabels(CLIENT_UID_KEY.getKey(), taskId); + builder.setMetric(metricBuilder.build()); + + TimeInterval timeInterval = + TimeInterval.newBuilder() + .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos())) + .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos())) + .build(); + + builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval)); + return builder.build(); + } + + private static MetricKind convertMetricKind(MetricData metricData) { + switch (metricData.getType()) { + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return convertHistogramType(metricData.getHistogramData()); + case LONG_GAUGE: + case DOUBLE_GAUGE: + return GAUGE; + case LONG_SUM: + return convertSumDataType(metricData.getLongSumData()); + case DOUBLE_SUM: + return convertSumDataType(metricData.getDoubleSumData()); + default: + return UNRECOGNIZED; + } + } + + private static MetricKind convertHistogramType(HistogramData histogramData) { + if (histogramData.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { + return CUMULATIVE; + } + return UNRECOGNIZED; + } + + private static MetricKind convertSumDataType(SumData sum) { + if (!sum.isMonotonic()) { + return GAUGE; + } + if (sum.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { + return CUMULATIVE; + } + return UNRECOGNIZED; + } + + private static ValueType convertValueType(MetricDataType metricDataType) { + switch (metricDataType) { + case LONG_GAUGE: + case LONG_SUM: + return INT64; + case DOUBLE_GAUGE: + case DOUBLE_SUM: + return DOUBLE; + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return DISTRIBUTION; + default: + return ValueType.UNRECOGNIZED; + } + } + + private static Point createPoint( + MetricDataType type, PointData pointData, TimeInterval timeInterval) { + Point.Builder builder = Point.newBuilder().setInterval(timeInterval); + switch (type) { + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return builder + .setValue( + TypedValue.newBuilder() + .setDistributionValue(convertHistogramData((HistogramPointData) pointData)) + .build()) + .build(); + case DOUBLE_GAUGE: + case DOUBLE_SUM: + return builder + .setValue( + TypedValue.newBuilder() + .setDoubleValue(((DoublePointData) pointData).getValue()) + .build()) + .build(); + case LONG_GAUGE: + case LONG_SUM: + return builder + .setValue(TypedValue.newBuilder().setInt64Value(((LongPointData) pointData).getValue())) + .build(); + default: + logger.log(Level.WARNING, "unsupported metric type"); + return builder.build(); + } + } + + private static Distribution convertHistogramData(HistogramPointData pointData) { + return Distribution.newBuilder() + .setCount(pointData.getCount()) + .setMean(pointData.getCount() == 0L ? 0.0D : pointData.getSum() / pointData.getCount()) + .setBucketOptions( + BucketOptions.newBuilder() + .setExplicitBuckets(Explicit.newBuilder().addAllBounds(pointData.getBoundaries()))) + .addAllBucketCounts(pointData.getCounts()) + .build(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java index 1cda49934c..3b2242385a 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java @@ -42,7 +42,7 @@ public void streamCreated(Attributes transportAttrs, Metadata headers) { @Override public void outboundMessageSent(int seqNo, long optionalWireSize, long optionalUncompressedSize) { - tracer.grpcChannelQueuedLatencies(stopwatch.elapsed(TimeUnit.MILLISECONDS)); + tracer.grpcChannelQueuedLatencies(stopwatch.elapsed(TimeUnit.NANOSECONDS)); } static class Factory extends ClientStreamTracer.Factory { diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java new file mode 100644 index 0000000000..d85300828b --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java @@ -0,0 +1,220 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.api.core.InternalApi; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.sdk.metrics.Aggregation; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.View; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +/** Defining Bigtable builit-in metrics scope, attributes, metric names and views. */ +@InternalApi +public class BuiltinMetricsConstants { + + // Metric attribute keys for monitored resource + public static final AttributeKey BIGTABLE_PROJECT_ID_KEY = + AttributeKey.stringKey("project_id"); + public static final AttributeKey INSTANCE_ID_KEY = AttributeKey.stringKey("instance"); + public static final AttributeKey TABLE_ID_KEY = AttributeKey.stringKey("table"); + public static final AttributeKey CLUSTER_ID_KEY = AttributeKey.stringKey("cluster"); + public static final AttributeKey ZONE_ID_KEY = AttributeKey.stringKey("zone"); + + // Metric attribute keys for labels + // We need to access APP_PROFILE_KEY in EnhancedBigtableStubSettings and STREAMING_KEY in + // IT tests, so they're public. + public static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile"); + public static final AttributeKey STREAMING_KEY = AttributeKey.booleanKey("streaming"); + public static final AttributeKey CLIENT_NAME_KEY = AttributeKey.stringKey("client_name"); + static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); + static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); + static final AttributeKey CLIENT_UID_KEY = AttributeKey.stringKey("client_uid"); + + // Metric names + public static final String OPERATION_LATENCIES_NAME = "operation_latencies"; + public static final String ATTEMPT_LATENCIES_NAME = "attempt_latencies"; + static final String RETRY_COUNT_NAME = "retry_count"; + static final String CONNECTIVITY_ERROR_COUNT_NAME = "connectivity_error_count"; + static final String SERVER_LATENCIES_NAME = "server_latencies"; + static final String FIRST_RESPONSE_LATENCIES_NAME = "first_response_latencies"; + static final String APPLICATION_BLOCKING_LATENCIES_NAME = "application_latencies"; + static final String CLIENT_BLOCKING_LATENCIES_NAME = "throttling_latencies"; + static final String PER_CONNECTION_ERROR_COUNT_NAME = "per_connection_error_count"; + + // Buckets under 100,000 are identical to buckets for server side metrics handler_latencies. + // Extending client side bucket to up to 3,200,000. + private static final Aggregation AGGREGATION_WITH_MILLIS_HISTOGRAM = + Aggregation.explicitBucketHistogram( + ImmutableList.of( + 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, 30.0, 40.0, + 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0, 300.0, 400.0, 500.0, 650.0, + 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0, 100000.0, 200000.0, + 400000.0, 800000.0, 1600000.0, 3200000.0)); // max is 53.3 minutes + + private static final Aggregation AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM = + Aggregation.explicitBucketHistogram( + ImmutableList.of( + 1.0, + 2.0, + 4.0, + 8.0, + 16.0, + 32.0, + 64.0, + 125.0, + 250.0, + 500.0, + 1_000.0, + 2_000.0, + 4_000.0, + 8_000.0, + 16_000.0, + 32_000.0, + 64_000.0, + 128_000.0, + 250_000.0, + 500_000.0, + 1_000_000.0)); + + public static final String METER_NAME = "bigtable.googleapis.com/internal/client/"; + + static final Set COMMON_ATTRIBUTES = + ImmutableSet.of( + BIGTABLE_PROJECT_ID_KEY, + INSTANCE_ID_KEY, + TABLE_ID_KEY, + APP_PROFILE_KEY, + CLUSTER_ID_KEY, + ZONE_ID_KEY, + METHOD_KEY, + CLIENT_NAME_KEY); + + static void defineView( + ImmutableMap.Builder viewMap, + String id, + Aggregation aggregation, + InstrumentType type, + String unit, + Set attributes) { + InstrumentSelector selector = + InstrumentSelector.builder() + .setName(id) + .setMeterName(METER_NAME) + .setType(type) + .setUnit(unit) + .build(); + Set attributesFilter = + ImmutableSet.builder() + .addAll( + COMMON_ATTRIBUTES.stream().map(AttributeKey::getKey).collect(Collectors.toSet())) + .addAll(attributes.stream().map(AttributeKey::getKey).collect(Collectors.toSet())) + .build(); + View view = + View.builder() + .setName(METER_NAME + id) + .setAggregation(aggregation) + .setAttributeFilter(attributesFilter) + .build(); + + viewMap.put(selector, view); + } + + public static Map getAllViews() { + ImmutableMap.Builder views = ImmutableMap.builder(); + + defineView( + views, + OPERATION_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder() + .addAll(COMMON_ATTRIBUTES) + .add(STREAMING_KEY, STATUS_KEY) + .build()); + defineView( + views, + ATTEMPT_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder() + .addAll(COMMON_ATTRIBUTES) + .add(STREAMING_KEY, STATUS_KEY) + .build()); + defineView( + views, + SERVER_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); + defineView( + views, + FIRST_RESPONSE_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); + defineView( + views, + APPLICATION_BLOCKING_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build()); + defineView( + views, + CLIENT_BLOCKING_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build()); + defineView( + views, + RETRY_COUNT_NAME, + Aggregation.sum(), + InstrumentType.COUNTER, + "1", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); + defineView( + views, + CONNECTIVITY_ERROR_COUNT_NAME, + Aggregation.sum(), + InstrumentType.COUNTER, + "1", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); + + defineView( + views, + PER_CONNECTION_ERROR_COUNT_NAME, + AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM, + InstrumentType.HISTOGRAM, + "1", + ImmutableSet.builder() + .add(BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, APP_PROFILE_KEY, CLIENT_NAME_KEY) + .build()); + + return views.build(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java index 2d8262a93e..abd214d760 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java @@ -16,13 +16,22 @@ package com.google.cloud.bigtable.data.v2.stub.metrics; import static com.google.api.gax.tracing.ApiTracerFactory.OperationType; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; import com.google.api.gax.retrying.ServerStreamingAttemptException; import com.google.api.gax.tracing.SpanName; -import com.google.cloud.bigtable.stats.StatsRecorderWrapper; -import com.google.common.annotations.VisibleForTesting; +import com.google.cloud.bigtable.Version; import com.google.common.base.Stopwatch; import com.google.common.math.IntMath; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.LongCounter; import java.util.concurrent.CancellationException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -37,8 +46,7 @@ */ class BuiltinMetricsTracer extends BigtableTracer { - private final StatsRecorderWrapper recorder; - + private static final String NAME = "java-bigtable/" + Version.VERSION; private final OperationType operationType; private final SpanName spanName; @@ -64,21 +72,56 @@ class BuiltinMetricsTracer extends BigtableTracer { private boolean flowControlIsDisabled = false; - private AtomicInteger requestLeft = new AtomicInteger(0); + private final AtomicInteger requestLeft = new AtomicInteger(0); // Monitored resource labels private String tableId = "unspecified"; private String zone = "global"; private String cluster = "unspecified"; - private AtomicLong totalClientBlockingTime = new AtomicLong(0); + private final AtomicLong totalClientBlockingTime = new AtomicLong(0); + + private final Attributes baseAttributes; + + private Long serverLatencies = null; + + // OpenCensus (and server) histogram buckets use [start, end), however OpenTelemetry uses (start, + // end]. To work around this, we measure all the latencies in nanoseconds and convert them + // to milliseconds and use DoubleHistogram. This should minimize the chance of a data + // point fall on the bucket boundary that causes off by one errors. + private final DoubleHistogram operationLatenciesHistogram; + private final DoubleHistogram attemptLatenciesHistogram; + private final DoubleHistogram serverLatenciesHistogram; + private final DoubleHistogram firstResponseLatenciesHistogram; + private final DoubleHistogram clientBlockingLatenciesHistogram; + private final DoubleHistogram applicationBlockingLatenciesHistogram; + private final LongCounter connectivityErrorCounter; + private final LongCounter retryCounter; - @VisibleForTesting BuiltinMetricsTracer( - OperationType operationType, SpanName spanName, StatsRecorderWrapper recorder) { + OperationType operationType, + SpanName spanName, + Attributes attributes, + DoubleHistogram operationLatenciesHistogram, + DoubleHistogram attemptLatenciesHistogram, + DoubleHistogram serverLatenciesHistogram, + DoubleHistogram firstResponseLatenciesHistogram, + DoubleHistogram clientBlockingLatenciesHistogram, + DoubleHistogram applicationBlockingLatenciesHistogram, + LongCounter connectivityErrorCounter, + LongCounter retryCounter) { this.operationType = operationType; this.spanName = spanName; - this.recorder = recorder; + this.baseAttributes = attributes; + + this.operationLatenciesHistogram = operationLatenciesHistogram; + this.attemptLatenciesHistogram = attemptLatenciesHistogram; + this.serverLatenciesHistogram = serverLatenciesHistogram; + this.firstResponseLatenciesHistogram = firstResponseLatenciesHistogram; + this.clientBlockingLatenciesHistogram = clientBlockingLatenciesHistogram; + this.applicationBlockingLatenciesHistogram = applicationBlockingLatenciesHistogram; + this.connectivityErrorCounter = connectivityErrorCounter; + this.retryCounter = retryCounter; } @Override @@ -203,13 +246,8 @@ public int getAttempt() { @Override public void recordGfeMetadata(@Nullable Long latency, @Nullable Throwable throwable) { - // Record the metrics and put in the map after the attempt is done, so we can have cluster and - // zone information if (latency != null) { - recorder.putGfeLatencies(latency); - recorder.putGfeMissingHeaders(0); - } else { - recorder.putGfeMissingHeaders(1); + serverLatencies = latency; } } @@ -220,13 +258,13 @@ public void setLocations(String zone, String cluster) { } @Override - public void batchRequestThrottled(long throttledTimeMs) { - totalClientBlockingTime.addAndGet(throttledTimeMs); + public void batchRequestThrottled(long throttledTimeNanos) { + totalClientBlockingTime.addAndGet(Duration.ofNanos(throttledTimeNanos).toMillis()); } @Override - public void grpcChannelQueuedLatencies(long queuedTimeMs) { - totalClientBlockingTime.addAndGet(queuedTimeMs); + public void grpcChannelQueuedLatencies(long queuedTimeNanos) { + totalClientBlockingTime.addAndGet(queuedTimeNanos); } @Override @@ -239,26 +277,43 @@ private void recordOperationCompletion(@Nullable Throwable status) { return; } operationTimer.stop(); - long operationLatency = operationTimer.elapsed(TimeUnit.MILLISECONDS); + + boolean isStreaming = operationType == OperationType.ServerStreaming; + String statusStr = Util.extractStatus(status); + + // Publish metric data with all the attributes. The attributes get filtered in + // BuiltinMetricsConstants when we construct the views. + Attributes attributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, tableId) + .put(CLUSTER_ID_KEY, cluster) + .put(ZONE_ID_KEY, zone) + .put(METHOD_KEY, spanName.toString()) + .put(CLIENT_NAME_KEY, NAME) + .put(STREAMING_KEY, isStreaming) + .put(STATUS_KEY, statusStr) + .build(); + long operationLatencyNano = operationTimer.elapsed(TimeUnit.NANOSECONDS); // Only record when retry count is greater than 0 so the retry // graph will be less confusing if (attemptCount > 1) { - recorder.putRetryCount(attemptCount - 1); + retryCounter.add(attemptCount - 1, attributes); } + operationLatenciesHistogram.record(convertToMs(operationLatencyNano), attributes); + // serverLatencyTimer should already be stopped in recordAttemptCompletion - recorder.putOperationLatencies(operationLatency); - recorder.putApplicationLatencies( - Duration.ofNanos(operationLatencyNano - totalServerLatencyNano.get()).toMillis()); + long applicationLatencyNano = operationLatencyNano - totalServerLatencyNano.get(); + applicationBlockingLatenciesHistogram.record(convertToMs(applicationLatencyNano), attributes); if (operationType == OperationType.ServerStreaming && spanName.getMethodName().equals("ReadRows")) { - recorder.putFirstResponseLatencies(firstResponsePerOpTimer.elapsed(TimeUnit.MILLISECONDS)); + firstResponseLatenciesHistogram.record( + convertToMs(firstResponsePerOpTimer.elapsed(TimeUnit.NANOSECONDS)), attributes); } - - recorder.recordOperation(Util.extractStatus(status), tableId, zone, cluster); } private void recordAttemptCompletion(@Nullable Throwable status) { @@ -273,8 +328,7 @@ private void recordAttemptCompletion(@Nullable Throwable status) { } } - // Make sure to reset the blocking time after recording it for the next attempt - recorder.putClientBlockingLatencies(totalClientBlockingTime.getAndSet(0)); + boolean isStreaming = operationType == OperationType.ServerStreaming; // Patch the status until it's fixed in gax. When an attempt failed, // it'll throw a ServerStreamingAttemptException. Unwrap the exception @@ -283,7 +337,35 @@ private void recordAttemptCompletion(@Nullable Throwable status) { status = status.getCause(); } - recorder.putAttemptLatencies(attemptTimer.elapsed(TimeUnit.MILLISECONDS)); - recorder.recordAttempt(Util.extractStatus(status), tableId, zone, cluster); + String statusStr = Util.extractStatus(status); + + Attributes attributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, tableId) + .put(CLUSTER_ID_KEY, cluster) + .put(ZONE_ID_KEY, zone) + .put(METHOD_KEY, spanName.toString()) + .put(CLIENT_NAME_KEY, NAME) + .put(STREAMING_KEY, isStreaming) + .put(STATUS_KEY, statusStr) + .build(); + + clientBlockingLatenciesHistogram.record(convertToMs(totalClientBlockingTime.get()), attributes); + + attemptLatenciesHistogram.record( + convertToMs(attemptTimer.elapsed(TimeUnit.NANOSECONDS)), attributes); + + if (serverLatencies != null) { + serverLatenciesHistogram.record(serverLatencies, attributes); + connectivityErrorCounter.add(0, attributes); + } else { + connectivityErrorCounter.add(1, attributes); + } + } + + private static double convertToMs(long nanoSeconds) { + double toMs = 1e-6; + return nanoSeconds * toMs; } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java index 794997071d..f0ac656978 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java @@ -15,29 +15,112 @@ */ package com.google.cloud.bigtable.data.v2.stub.metrics; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME; + import com.google.api.core.InternalApi; import com.google.api.gax.tracing.ApiTracer; import com.google.api.gax.tracing.ApiTracerFactory; import com.google.api.gax.tracing.BaseApiTracerFactory; import com.google.api.gax.tracing.SpanName; -import com.google.cloud.bigtable.stats.StatsWrapper; -import com.google.common.collect.ImmutableMap; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.Meter; +import java.io.IOException; /** - * {@link ApiTracerFactory} that will generate OpenCensus metrics by using the {@link ApiTracer} + * {@link ApiTracerFactory} that will generate OpenTelemetry metrics by using the {@link ApiTracer} * api. */ @InternalApi("For internal use only") public class BuiltinMetricsTracerFactory extends BaseApiTracerFactory { - private final ImmutableMap statsAttributes; + private final Attributes attributes; + + private static final String MILLISECOND = "ms"; + private static final String COUNT = "1"; - public static BuiltinMetricsTracerFactory create(ImmutableMap statsAttributes) { - return new BuiltinMetricsTracerFactory(statsAttributes); + private final DoubleHistogram operationLatenciesHistogram; + private final DoubleHistogram attemptLatenciesHistogram; + private final DoubleHistogram serverLatenciesHistogram; + private final DoubleHistogram firstResponseLatenciesHistogram; + private final DoubleHistogram clientBlockingLatenciesHistogram; + private final DoubleHistogram applicationBlockingLatenciesHistogram; + private final LongCounter connectivityErrorCounter; + private final LongCounter retryCounter; + + public static BuiltinMetricsTracerFactory create( + OpenTelemetry openTelemetry, Attributes attributes) throws IOException { + return new BuiltinMetricsTracerFactory(openTelemetry, attributes); } - private BuiltinMetricsTracerFactory(ImmutableMap statsAttributes) { - this.statsAttributes = statsAttributes; + BuiltinMetricsTracerFactory(OpenTelemetry openTelemetry, Attributes attributes) { + this.attributes = attributes; + Meter meter = openTelemetry.getMeter(METER_NAME); + + operationLatenciesHistogram = + meter + .histogramBuilder(OPERATION_LATENCIES_NAME) + .setDescription( + "Total time until final operation success or failure, including retries and backoff.") + .setUnit(MILLISECOND) + .build(); + attemptLatenciesHistogram = + meter + .histogramBuilder(ATTEMPT_LATENCIES_NAME) + .setDescription("Client observed latency per RPC attempt.") + .setUnit(MILLISECOND) + .build(); + serverLatenciesHistogram = + meter + .histogramBuilder(SERVER_LATENCIES_NAME) + .setDescription( + "The latency measured from the moment that the RPC entered the Google data center until the RPC was completed.") + .setUnit(MILLISECOND) + .build(); + firstResponseLatenciesHistogram = + meter + .histogramBuilder(FIRST_RESPONSE_LATENCIES_NAME) + .setDescription( + "Latency from operation start until the response headers were received. The publishing of the measurement will be delayed until the attempt response has been received.") + .setUnit(MILLISECOND) + .build(); + clientBlockingLatenciesHistogram = + meter + .histogramBuilder(CLIENT_BLOCKING_LATENCIES_NAME) + .setDescription( + "The artificial latency introduced by the client to limit the number of outstanding requests. The publishing of the measurement will be delayed until the attempt trailers have been received.") + .setUnit(MILLISECOND) + .build(); + applicationBlockingLatenciesHistogram = + meter + .histogramBuilder(APPLICATION_BLOCKING_LATENCIES_NAME) + .setDescription( + "The latency of the client application consuming available response data.") + .setUnit(MILLISECOND) + .build(); + connectivityErrorCounter = + meter + .counterBuilder(CONNECTIVITY_ERROR_COUNT_NAME) + .setDescription( + "Number of requests that failed to reach the Google datacenter. (Requests without google response headers") + .setUnit(COUNT) + .build(); + retryCounter = + meter + .counterBuilder(RETRY_COUNT_NAME) + .setDescription("The number of additional RPCs sent after the initial attempt.") + .setUnit(COUNT) + .build(); } @Override @@ -45,6 +128,14 @@ public ApiTracer newTracer(ApiTracer parent, SpanName spanName, OperationType op return new BuiltinMetricsTracer( operationType, spanName, - StatsWrapper.createRecorder(operationType, spanName, statsAttributes)); + attributes, + operationLatenciesHistogram, + attemptLatenciesHistogram, + serverLatenciesHistogram, + firstResponseLatenciesHistogram, + clientBlockingLatenciesHistogram, + applicationBlockingLatenciesHistogram, + connectivityErrorCounter, + retryCounter); } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java new file mode 100644 index 0000000000..445160a146 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java @@ -0,0 +1,59 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.auth.Credentials; +import com.google.auth.oauth2.GoogleCredentials; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.View; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import java.io.IOException; +import java.util.Map; +import javax.annotation.Nullable; + +/** + * A util class to register built-in metrics on a custom OpenTelemetry instance. This is for + * advanced usage, and is only necessary when wanting to write built-in metrics to cloud monitoring + * and custom sinks. Please refer to {@link CustomOpenTelemetryMetricsProvider} for example usage. + */ +public class BuiltinMetricsView { + + private BuiltinMetricsView() {} + + /** + * Register built-in metrics on the {@link SdkMeterProviderBuilder} with application default + * credentials. + */ + public static void registerBuiltinMetrics(String projectId, SdkMeterProviderBuilder builder) + throws IOException { + BuiltinMetricsView.registerBuiltinMetrics( + projectId, GoogleCredentials.getApplicationDefault(), builder); + } + + /** Register built-in metrics on the {@link SdkMeterProviderBuilder} with credentials. */ + public static void registerBuiltinMetrics( + String projectId, @Nullable Credentials credentials, SdkMeterProviderBuilder builder) + throws IOException { + MetricExporter metricExporter = BigtableCloudMonitoringExporter.create(projectId, credentials); + for (Map.Entry entry : + BuiltinMetricsConstants.getAllViews().entrySet()) { + builder.registerView(entry.getKey(), entry.getValue()); + } + builder.registerMetricReader(PeriodicMetricReader.create(metricExporter)); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java new file mode 100644 index 0000000000..8c1c5c1c90 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java @@ -0,0 +1,70 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.common.base.MoreObjects; +import io.opentelemetry.api.OpenTelemetry; + +/** + * Set a custom OpenTelemetry instance. + * + *

To register client side metrics on the custom OpenTelemetry: + * + *

{@code
+ * SdkMeterProviderBuilder sdkMeterProvider = SdkMeterProvider.builder();
+ *
+ * // register Builtin metrics on your meter provider with default credentials
+ * BuiltinMetricsView.registerBuiltinMetrics("project-id", sdkMeterProvider);
+ *
+ * // register other metrics reader and views
+ * sdkMeterProvider.registerMetricReader(..);
+ * sdkMeterProvider.registerView(..);
+ *
+ * // create the OTEL instance
+ * OpenTelemetry openTelemetry = OpenTelemetrySdk
+ *     .builder()
+ *     .setMeterProvider(sdkMeterProvider.build())
+ *     .build();
+ *
+ * // Override MetricsProvider in BigtableDataSettings
+ * BigtableDataSettings settings = BigtableDataSettings.newBuilder()
+ *   .setProjectId("my-project")
+ *   .setInstanceId("my-instance-id")
+ *   .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)
+ *   .build();
+ * }
+ */ +public final class CustomOpenTelemetryMetricsProvider implements MetricsProvider { + + private final OpenTelemetry otel; + + public static CustomOpenTelemetryMetricsProvider create(OpenTelemetry otel) { + return new CustomOpenTelemetryMetricsProvider(otel); + } + + private CustomOpenTelemetryMetricsProvider(OpenTelemetry otel) { + this.otel = otel; + } + + public OpenTelemetry getOpenTelemetry() { + return otel; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("openTelemetry", otel).toString(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java new file mode 100644 index 0000000000..b8aad8c931 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java @@ -0,0 +1,63 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.api.core.InternalApi; +import com.google.auth.Credentials; +import com.google.common.base.MoreObjects; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import java.io.IOException; +import javax.annotation.Nullable; + +/** + * Set {@link + * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)}, + * to {@link this#INSTANCE} to enable collecting and export client side metrics + * https://cloud.google.com/bigtable/docs/client-side-metrics. This is the default setting in {@link + * com.google.cloud.bigtable.data.v2.BigtableDataSettings}. + */ +public final class DefaultMetricsProvider implements MetricsProvider { + + public static DefaultMetricsProvider INSTANCE = new DefaultMetricsProvider(); + + private OpenTelemetry openTelemetry; + private String projectId; + + private DefaultMetricsProvider() {} + + @InternalApi + public OpenTelemetry getOpenTelemetry(String projectId, @Nullable Credentials credentials) + throws IOException { + this.projectId = projectId; + if (openTelemetry == null) { + SdkMeterProviderBuilder meterProvider = SdkMeterProvider.builder(); + BuiltinMetricsView.registerBuiltinMetrics(projectId, credentials, meterProvider); + openTelemetry = OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + } + return openTelemetry; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("projectId", projectId) + .add("openTelemetry", openTelemetry) + .toString(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java index cab3b0bbd0..a891df9509 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java @@ -15,12 +15,15 @@ */ package com.google.cloud.bigtable.data.v2.stub.metrics; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME; + import com.google.api.core.InternalApi; -import com.google.cloud.bigtable.stats.StatsRecorderWrapperForConnection; -import com.google.cloud.bigtable.stats.StatsWrapper; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableMap; import io.grpc.ClientInterceptor; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongHistogram; +import io.opentelemetry.api.metrics.Meter; import java.util.Collections; import java.util.Set; import java.util.WeakHashMap; @@ -30,24 +33,30 @@ /* Background task that goes through all connections and updates the errors_per_connection metric. */ @InternalApi("For internal use only") public class ErrorCountPerConnectionMetricTracker implements Runnable { + private static final Integer PER_CONNECTION_ERROR_COUNT_PERIOD_SECONDS = 60; + + private final LongHistogram perConnectionErrorCountHistogram; + private final Attributes attributes; + private final Set connectionErrorCountInterceptors; private final Object interceptorsLock = new Object(); - // This is not final so that it can be updated and mocked during testing. - private StatsRecorderWrapperForConnection statsRecorderWrapperForConnection; - @VisibleForTesting - void setStatsRecorderWrapperForConnection( - StatsRecorderWrapperForConnection statsRecorderWrapperForConnection) { - this.statsRecorderWrapperForConnection = statsRecorderWrapperForConnection; - } - - public ErrorCountPerConnectionMetricTracker(ImmutableMap builtinAttributes) { + public ErrorCountPerConnectionMetricTracker(OpenTelemetry openTelemetry, Attributes attributes) { connectionErrorCountInterceptors = Collections.synchronizedSet(Collections.newSetFromMap(new WeakHashMap<>())); - this.statsRecorderWrapperForConnection = - StatsWrapper.createRecorderForConnection(builtinAttributes); + Meter meter = openTelemetry.getMeter(METER_NAME); + + perConnectionErrorCountHistogram = + meter + .histogramBuilder(PER_CONNECTION_ERROR_COUNT_NAME) + .ofLongs() + .setDescription("Distribution of counts of channels per 'error count per minute'.") + .setUnit("1") + .build(); + + this.attributes = attributes; } public void startConnectionErrorCountTracker(ScheduledExecutorService scheduler) { @@ -75,7 +84,7 @@ public void run() { if (errors > 0 || successes > 0) { // TODO: add a metric to also keep track of the number of successful requests per each // connection. - statsRecorderWrapperForConnection.putAndRecordPerConnectionErrorCount(errors); + perConnectionErrorCountHistogram.record(errors, attributes); } } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java new file mode 100644 index 0000000000..251bb41619 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java @@ -0,0 +1,25 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.api.core.InternalExtensionOnly; + +/** + * Provide client side metrics https://cloud.google.com/bigtable/docs/client-side-metrics + * implementations. + */ +@InternalExtensionOnly +public interface MetricsProvider {} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java new file mode 100644 index 0000000000..9a00ddb135 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java @@ -0,0 +1,36 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.common.base.MoreObjects; + +/** + * Set {@link + * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)}, + * to {@link this#INSTANCE} to disable collecting and export client side metrics + * https://cloud.google.com/bigtable/docs/client-side-metrics. + */ +public final class NoopMetricsProvider implements MetricsProvider { + + public static NoopMetricsProvider INSTANCE = new NoopMetricsProvider(); + + private NoopMetricsProvider() {} + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).toString(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java index b7140f0156..ce73d75dc1 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java @@ -21,6 +21,7 @@ import com.google.api.gax.rpc.ApiCallContext; import com.google.api.gax.rpc.UnaryCallable; import com.google.api.gax.tracing.ApiTracer; +import org.threeten.bp.Duration; /** * This callable will extract total throttled time from {@link ApiCallContext} and add it to {@link @@ -42,7 +43,8 @@ public ApiFuture futureCall(RequestT request, ApiCallContext context) // this should always be true if (tracer instanceof BigtableTracer) { ((BigtableTracer) tracer) - .batchRequestThrottled(context.getOption(Batcher.THROTTLED_TIME_KEY)); + .batchRequestThrottled( + Duration.ofMillis(context.getOption(Batcher.THROTTLED_TIME_KEY)).toNanos()); } } return innerCallable.futureCall(request, context); diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/it/BigtableInstanceAdminClientIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/it/BigtableInstanceAdminClientIT.java index d8b9410cae..76413165bd 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/it/BigtableInstanceAdminClientIT.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/it/BigtableInstanceAdminClientIT.java @@ -242,6 +242,43 @@ public void appProfileTestPriority() { } } + @Test + public void appProfileTestDataBoost() { + String newInstanceId = prefixGenerator.newPrefix(); + String newClusterId = newInstanceId + "-c1"; + + client.createInstance( + CreateInstanceRequest.of(newInstanceId) + .addCluster(newClusterId, testEnvRule.env().getPrimaryZone(), 1, StorageType.SSD) + .setDisplayName("Priority-Instance-Test") + .addLabel("state", "readytodelete") + .setType(Type.PRODUCTION)); + + try { + assertThat(client.exists(newInstanceId)).isTrue(); + + String testAppProfile = prefixGenerator.newPrefix(); + + CreateAppProfileRequest request = + CreateAppProfileRequest.of(newInstanceId, testAppProfile) + .setRoutingPolicy(AppProfile.SingleClusterRoutingPolicy.of(newClusterId)) + .setIsolationPolicy( + AppProfile.DataBoostIsolationReadOnlyPolicy.of( + AppProfile.ComputeBillingOwner.HOST_PAYS)) + .setDescription("databoost app profile"); + + AppProfile newlyCreateAppProfile = client.createAppProfile(request); + AppProfile.ComputeBillingOwner computeBillingOwner = + ((AppProfile.DataBoostIsolationReadOnlyPolicy) newlyCreateAppProfile.getIsolationPolicy()) + .getComputeBillingOwner(); + assertThat(computeBillingOwner).isEqualTo(AppProfile.ComputeBillingOwner.HOST_PAYS); + } finally { + if (client.exists(newInstanceId)) { + client.deleteInstance(newInstanceId); + } + } + } + @Test public void iamUpdateTest() { Policy policy = client.getIamPolicy(instanceId); diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/AppProfileTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/AppProfileTest.java index 35711cefdb..8215e5f8fc 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/AppProfileTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/AppProfileTest.java @@ -234,4 +234,61 @@ public void testHashCode() { assertThat(updateAppProfileRequest.hashCode()) .isNotEqualTo(updateAppProfileRequest3.hashCode()); } + + @Test + public void testFromProtoWithDataBoostIsolation() { + AppProfile producer = + AppProfile.fromProto( + com.google.bigtable.admin.v2.AppProfile.newBuilder() + .setName(AppProfileName.of("my-project", "my-instance", "my-profile").toString()) + .setDescription("my description") + .setSingleClusterRouting( + SingleClusterRouting.newBuilder() + .setClusterId("my-cluster") + .setAllowTransactionalWrites(true) + .build()) + .setDataBoostIsolationReadOnly( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.newBuilder() + .setComputeBillingOwner( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + .ComputeBillingOwner.HOST_PAYS)) + .setEtag("my-etag") + .build()); + + assertThat(producer.getInstanceId()).isEqualTo("my-instance"); + assertThat(producer.getId()).isEqualTo("my-profile"); + assertThat(producer.getDescription()).isEqualTo("my description"); + assertThat(producer.getPolicy()).isEqualTo(SingleClusterRoutingPolicy.of("my-cluster", true)); + assertThat(producer.getIsolationPolicy()) + .isEqualTo( + AppProfile.DataBoostIsolationReadOnlyPolicy.of( + AppProfile.ComputeBillingOwner.HOST_PAYS)); + + AppProfile consumer = + AppProfile.fromProto( + com.google.bigtable.admin.v2.AppProfile.newBuilder() + .setName(AppProfileName.of("my-project", "my-instance", "my-profile").toString()) + .setDescription("my description") + .setSingleClusterRouting( + SingleClusterRouting.newBuilder() + .setClusterId("my-cluster") + .setAllowTransactionalWrites(true) + .build()) + .setDataBoostIsolationReadOnly( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.newBuilder() + .setComputeBillingOwner( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + .ComputeBillingOwner.COMPUTE_BILLING_OWNER_UNSPECIFIED)) + .setEtag("my-etag") + .build()); + + assertThat(consumer.getInstanceId()).isEqualTo("my-instance"); + assertThat(consumer.getId()).isEqualTo("my-profile"); + assertThat(consumer.getDescription()).isEqualTo("my description"); + assertThat(consumer.getPolicy()).isEqualTo(SingleClusterRoutingPolicy.of("my-cluster", true)); + assertThat(consumer.getIsolationPolicy()) + .isEqualTo( + AppProfile.DataBoostIsolationReadOnlyPolicy.of( + AppProfile.ComputeBillingOwner.UNSPECIFIED)); + } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequestTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequestTest.java index 4e5812f774..088dc2bcfe 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequestTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequestTest.java @@ -17,6 +17,7 @@ import static com.google.common.truth.Truth.assertThat; +import com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly; import com.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny; import com.google.bigtable.admin.v2.AppProfile.SingleClusterRouting; import com.google.bigtable.admin.v2.AppProfile.StandardIsolation; @@ -84,4 +85,20 @@ public void testStandardIsolation() { assertThat(wrapper.toProto("my-project").getAppProfile().getStandardIsolation()) .isEqualTo(StandardIsolation.getDefaultInstance()); } + + @Test + public void testDataBoostIsolationReadOnly() { + CreateAppProfileRequest wrapper = + CreateAppProfileRequest.of("my-instance", "my-profile") + .setRoutingPolicy(MultiClusterRoutingPolicy.of()) + .setIsolationPolicy( + AppProfile.DataBoostIsolationReadOnlyPolicy.of( + AppProfile.ComputeBillingOwner.HOST_PAYS)); + + assertThat(wrapper.toProto("my-project").getAppProfile().getDataBoostIsolationReadOnly()) + .isEqualTo( + DataBoostIsolationReadOnly.newBuilder() + .setComputeBillingOwner(DataBoostIsolationReadOnly.ComputeBillingOwner.HOST_PAYS) + .build()); + } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequestTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequestTest.java index 13e98f14c1..04cf3f0813 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequestTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequestTest.java @@ -17,6 +17,7 @@ import static com.google.common.truth.Truth.assertThat; +import com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly; import com.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny; import com.google.bigtable.admin.v2.AppProfile.SingleClusterRouting; import com.google.bigtable.admin.v2.AppProfile.StandardIsolation; @@ -111,4 +112,38 @@ public void testUpdateExistingStandardIsolation() { .setUpdateMask(FieldMask.newBuilder().addPaths("standard_isolation")) .build()); } + + @Test + public void testUpdateExistingDataBoostIsolationReadOnly() { + com.google.bigtable.admin.v2.AppProfile existingProto = + com.google.bigtable.admin.v2.AppProfile.newBuilder() + .setName("projects/my-project/instances/my-instance/appProfiles/my-profile") + .setEtag("my-etag") + .setDescription("description") + .setMultiClusterRoutingUseAny(MultiClusterRoutingUseAny.getDefaultInstance()) + .setStandardIsolation(StandardIsolation.getDefaultInstance()) + .build(); + + AppProfile existingWrapper = AppProfile.fromProto(existingProto); + + UpdateAppProfileRequest updateWrapper = + UpdateAppProfileRequest.of(existingWrapper) + .setIsolationPolicy( + AppProfile.DataBoostIsolationReadOnlyPolicy.of( + AppProfile.ComputeBillingOwner.HOST_PAYS)); + + assertThat(updateWrapper.toProto("my-project")) + .isEqualTo( + com.google.bigtable.admin.v2.UpdateAppProfileRequest.newBuilder() + .setAppProfile( + existingProto + .toBuilder() + .setDataBoostIsolationReadOnly( + DataBoostIsolationReadOnly.newBuilder() + .setComputeBillingOwner( + DataBoostIsolationReadOnly.ComputeBillingOwner.HOST_PAYS) + .build())) + .setUpdateMask(FieldMask.newBuilder().addPaths("data_boost_isolation_read_only")) + .build()); + } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java index a35112b380..fea66e82bf 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java @@ -36,6 +36,7 @@ import com.google.bigtable.v2.ReadRowsResponse; import com.google.cloud.bigtable.data.v2.internal.NameUtil; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider; import com.google.common.base.Preconditions; import com.google.common.io.BaseEncoding; import io.grpc.Attributes; @@ -169,10 +170,13 @@ public void tearDown() { @Test public void testNewClientsShareTransportChannel() throws Exception { - // Create 3 lightweight clients - - try (BigtableDataClientFactory factory = BigtableDataClientFactory.create(defaultSettings); + try (BigtableDataClientFactory factory = + BigtableDataClientFactory.create( + defaultSettings + .toBuilder() + .setMetricsProvider(NoopMetricsProvider.INSTANCE) + .build()); BigtableDataClient ignored1 = factory.createForInstance("project1", "instance1"); BigtableDataClient ignored2 = factory.createForInstance("project2", "instance2"); BigtableDataClient ignored3 = factory.createForInstance("project3", "instance3")) { @@ -316,7 +320,7 @@ public void testFeatureFlags() throws Exception { @Test public void testBulkMutationFlowControllerConfigured() throws Exception { BigtableDataSettings settings = - BigtableDataSettings.newBuilder() + BigtableDataSettings.newBuilderForEmulator(server.getPort()) .setProjectId("my-project") .setInstanceId("my-instance") .setCredentialsProvider(credentialsProvider) diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java index 4e75fb8631..56181a20ab 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java @@ -15,34 +15,64 @@ */ package com.google.cloud.bigtable.data.v2.it; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedValue; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getMetricData; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getStartTimeSeconds; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.verifyAttributes; +import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.Truth.assertWithMessage; import static com.google.common.truth.TruthJUnit.assume; import com.google.api.client.util.Lists; +import com.google.cloud.bigtable.admin.v2.BigtableInstanceAdminClient; import com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient; +import com.google.cloud.bigtable.admin.v2.models.AppProfile; +import com.google.cloud.bigtable.admin.v2.models.CreateAppProfileRequest; import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest; import com.google.cloud.bigtable.admin.v2.models.Table; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView; +import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider; import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv; import com.google.cloud.bigtable.test_helpers.env.PrefixGenerator; import com.google.cloud.bigtable.test_helpers.env.TestEnvRule; import com.google.cloud.monitoring.v3.MetricServiceClient; import com.google.common.base.Stopwatch; +import com.google.common.collect.BoundType; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Range; import com.google.monitoring.v3.ListTimeSeriesRequest; import com.google.monitoring.v3.ListTimeSeriesResponse; +import com.google.monitoring.v3.Point; import com.google.monitoring.v3.ProjectName; import com.google.monitoring.v3.TimeInterval; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Timestamp; import com.google.protobuf.util.Timestamps; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.logging.Level; import java.util.logging.Logger; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -50,6 +80,7 @@ import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import org.threeten.bp.Duration; +import org.threeten.bp.Instant; @RunWith(JUnit4.class) public class BuiltinMetricsIT { @@ -58,71 +89,131 @@ public class BuiltinMetricsIT { private static final Logger logger = Logger.getLogger(BuiltinMetricsIT.class.getName()); @Rule public Timeout globalTimeout = Timeout.seconds(900); - private static Table table; - private static BigtableTableAdminClient tableAdminClient; - private static MetricServiceClient metricClient; + + private Table tableCustomOtel; + private Table tableDefault; + private BigtableDataClient clientCustomOtel; + private BigtableDataClient clientDefault; + private BigtableTableAdminClient tableAdminClient; + private BigtableInstanceAdminClient instanceAdminClient; + private MetricServiceClient metricClient; + + private InMemoryMetricReader metricReader; + private String appProfileCustomOtel; + private String appProfileDefault; public static String[] VIEWS = { "operation_latencies", "attempt_latencies", "connectivity_error_count", - "application_blocking_latencies" + "application_blocking_latencies", }; - @BeforeClass - public static void setUpClass() throws IOException { + @Before + public void setup() throws IOException { + // This test tests 2 things. End-to-end test using the default OTEL instance created by the + // client, and also end-to-end test using a custom OTEL instance set by the customer. In + // both tests, a BigtableCloudMonitoringExporter is created to export data to Cloud Monitoring. assume() .withMessage("Builtin metrics integration test is not supported by emulator") .that(testEnvRule.env()) .isNotInstanceOf(EmulatorEnv.class); - // Enable built in metrics - BigtableDataSettings.enableBuiltinMetrics(); - // Create a cloud monitoring client metricClient = MetricServiceClient.create(); tableAdminClient = testEnvRule.env().getTableAdminClient(); + instanceAdminClient = testEnvRule.env().getInstanceAdminClient(); + appProfileCustomOtel = PrefixGenerator.newPrefix("test1"); + appProfileDefault = PrefixGenerator.newPrefix("test2"); + instanceAdminClient.createAppProfile( + CreateAppProfileRequest.of(testEnvRule.env().getInstanceId(), appProfileCustomOtel) + .setRoutingPolicy( + AppProfile.SingleClusterRoutingPolicy.of(testEnvRule.env().getPrimaryClusterId())) + .setIsolationPolicy(AppProfile.StandardIsolationPolicy.of(AppProfile.Priority.LOW))); + instanceAdminClient.createAppProfile( + CreateAppProfileRequest.of(testEnvRule.env().getInstanceId(), appProfileDefault) + .setRoutingPolicy( + AppProfile.SingleClusterRoutingPolicy.of(testEnvRule.env().getPrimaryClusterId())) + .setIsolationPolicy(AppProfile.StandardIsolationPolicy.of(AppProfile.Priority.LOW))); + + // When using the custom OTEL instance, we can also register a InMemoryMetricReader on the + // SdkMeterProvider to verify the data exported on Cloud Monitoring with the in memory metric + // data collected in InMemoryMetricReader. + metricReader = InMemoryMetricReader.create(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + + BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder(); + + clientCustomOtel = + BigtableDataClient.create( + settings + .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)) + .setAppProfileId(appProfileCustomOtel) + .build()); + clientDefault = BigtableDataClient.create(settings.setAppProfileId(appProfileDefault).build()); } - @AfterClass - public static void tearDown() { + @After + public void tearDown() { if (metricClient != null) { metricClient.close(); } - if (table != null) { - tableAdminClient.deleteTable(table.getId()); + if (tableCustomOtel != null) { + tableAdminClient.deleteTable(tableCustomOtel.getId()); + } + if (tableDefault != null) { + tableAdminClient.deleteTable(tableDefault.getId()); + } + if (instanceAdminClient != null) { + instanceAdminClient.deleteAppProfile( + testEnvRule.env().getInstanceId(), appProfileCustomOtel, true); + instanceAdminClient.deleteAppProfile( + testEnvRule.env().getInstanceId(), appProfileDefault, true); + } + if (clientCustomOtel != null) { + clientCustomOtel.close(); + } + if (clientDefault != null) { + clientDefault.close(); } } @Test - public void testBuiltinMetrics() throws Exception { - logger.info("Started testing builtin metrics"); - table = + public void testBuiltinMetricsWithDefaultOTEL() throws Exception { + logger.info("Started testing builtin metrics with default OTEL"); + tableDefault = tableAdminClient.createTable( - CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test")) + CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test1")) .addFamily("cf")); - logger.info("Create table: " + table.getId()); - // Send a MutateRow and ReadRows request - testEnvRule - .env() - .getDataClient() - .mutateRow(RowMutation.create(table.getId(), "a-new-key").setCell("cf", "q", "abc")); + logger.info("Create default table: " + tableDefault.getId()); + + Instant start = Instant.now().minus(Duration.ofSeconds(10)); + + // Send a MutateRow and ReadRows request and measure the latencies for these requests. + clientDefault.mutateRow( + RowMutation.create(tableDefault.getId(), "a-new-key").setCell("cf", "q", "abc")); ArrayList rows = - Lists.newArrayList( - testEnvRule.env().getDataClient().readRows(Query.create(table.getId()).limit(10))); + Lists.newArrayList(clientDefault.readRows(Query.create(tableDefault.getId()).limit(10))); - Stopwatch stopwatch = Stopwatch.createStarted(); + // This stopwatch is used for to limit fetching of metric data in verifyMetrics + Stopwatch metricsPollingStopwatch = Stopwatch.createStarted(); ProjectName name = ProjectName.of(testEnvRule.env().getProjectId()); - // Restrict time to last 10 minutes and 5 minutes after the request - long startMillis = System.currentTimeMillis() - Duration.ofMinutes(10).toMillis(); - long endMillis = startMillis + Duration.ofMinutes(15).toMillis(); + // Interval is set in the monarch request when query metric timestamps. + // Restrict it to before we send to request and 3 minute after we send the request. If + // it turns out to be still flaky we can increase the filter range. + Instant end = Instant.now().plus(Duration.ofMinutes(3)); TimeInterval interval = TimeInterval.newBuilder() - .setStartTime(Timestamps.fromMillis(startMillis)) - .setEndTime(Timestamps.fromMillis(endMillis)) + .setStartTime(Timestamps.fromMillis(start.toEpochMilli())) + .setEndTime(Timestamps.fromMillis(end.toEpochMilli())) .build(); for (String view : VIEWS) { @@ -132,42 +223,123 @@ public void testBuiltinMetrics() throws Exception { String.format( "metric.type=\"bigtable.googleapis.com/client/%s\" " + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.MutateRow\"" - + " AND resource.labels.table=\"%s\"", - view, testEnvRule.env().getInstanceId(), table.getId()); + + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"", + view, testEnvRule.env().getInstanceId(), tableDefault.getId(), appProfileDefault); ListTimeSeriesRequest.Builder requestBuilder = ListTimeSeriesRequest.newBuilder() .setName(name.toString()) .setFilter(metricFilter) .setInterval(interval) .setView(ListTimeSeriesRequest.TimeSeriesView.FULL); - - verifyMetricsArePublished(requestBuilder.build(), stopwatch, view); + verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view); // Verify that metrics are published for ReadRows request metricFilter = String.format( "metric.type=\"bigtable.googleapis.com/client/%s\" " + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.ReadRows\"" - + " AND resource.labels.table=\"%s\"", - view, testEnvRule.env().getInstanceId(), table.getId()); + + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"", + view, testEnvRule.env().getInstanceId(), tableDefault.getId(), appProfileDefault); + requestBuilder.setFilter(metricFilter); + + verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view); + } + } + + @Test + public void testBuiltinMetricsWithCustomOTEL() throws Exception { + logger.info("Started testing builtin metrics with custom OTEL"); + tableCustomOtel = + tableAdminClient.createTable( + CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test2")) + .addFamily("cf")); + logger.info("Create custom table: " + tableCustomOtel.getId()); + + Instant start = Instant.now().minus(Duration.ofSeconds(10)); + // Send a MutateRow and ReadRows request and measure the latencies for these requests. + clientCustomOtel.mutateRow( + RowMutation.create(tableCustomOtel.getId(), "a-new-key").setCell("cf", "q", "abc")); + ArrayList rows = + Lists.newArrayList( + clientCustomOtel.readRows(Query.create(tableCustomOtel.getId()).limit(10))); + + // This stopwatch is used for to limit fetching of metric data in verifyMetrics + Stopwatch metricsPollingStopwatch = Stopwatch.createStarted(); + + ProjectName name = ProjectName.of(testEnvRule.env().getProjectId()); + + Collection fromMetricReader = metricReader.collectAllMetrics(); + + // Interval is set in the monarch request when query metric timestamps. + // Restrict it to before we send to request and 3 minute after we send the request. If + // it turns out to be still flaky we can increase the filter range. + Instant end = start.plus(Duration.ofMinutes(3)); + TimeInterval interval = + TimeInterval.newBuilder() + .setStartTime(Timestamps.fromMillis(start.toEpochMilli())) + .setEndTime(Timestamps.fromMillis(end.toEpochMilli())) + .build(); + + for (String view : VIEWS) { + String otelMetricName = view; + if (view.equals("application_blocking_latencies")) { + otelMetricName = "application_latencies"; + } + MetricData dataFromReader = getMetricData(fromMetricReader, otelMetricName); + + // Filter on instance and method name + // Verify that metrics are correct for MutateRows request + String metricFilter = + String.format( + "metric.type=\"bigtable.googleapis.com/client/%s\" " + + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.MutateRow\"" + + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"", + view, + testEnvRule.env().getInstanceId(), + tableCustomOtel.getId(), + appProfileCustomOtel); + ListTimeSeriesRequest.Builder requestBuilder = + ListTimeSeriesRequest.newBuilder() + .setName(name.toString()) + .setFilter(metricFilter) + .setInterval(interval) + .setView(ListTimeSeriesRequest.TimeSeriesView.FULL); + + ListTimeSeriesResponse response = + verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view); + verifyMetricsWithMetricsReader(response, dataFromReader); + + // Verify that metrics are correct for ReadRows request + metricFilter = + String.format( + "metric.type=\"bigtable.googleapis.com/client/%s\" " + + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.ReadRows\"" + + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"", + view, + testEnvRule.env().getInstanceId(), + tableCustomOtel.getId(), + appProfileCustomOtel); requestBuilder.setFilter(metricFilter); - verifyMetricsArePublished(requestBuilder.build(), stopwatch, view); + response = verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view); + verifyMetricsWithMetricsReader(response, dataFromReader); } } - private void verifyMetricsArePublished( - ListTimeSeriesRequest request, Stopwatch stopwatch, String view) throws Exception { + private ListTimeSeriesResponse verifyMetricsArePublished( + ListTimeSeriesRequest request, Stopwatch metricsPollingStopwatch, String view) + throws Exception { ListTimeSeriesResponse response = metricClient.listTimeSeriesCallable().call(request); - logger.log( - Level.INFO, - "Checking for view " - + view - + ", has timeseries=" - + response.getTimeSeriesCount() - + " stopwatch elapsed " - + stopwatch.elapsed(TimeUnit.MINUTES)); - while (response.getTimeSeriesCount() == 0 && stopwatch.elapsed(TimeUnit.MINUTES) < 10) { + while (response.getTimeSeriesCount() == 0 + && metricsPollingStopwatch.elapsed(TimeUnit.MINUTES) < 10) { + logger.log( + Level.INFO, + "Checking for view " + + view + + ", has timeseries=" + + response.getTimeSeriesCount() + + " stopwatch elapsed " + + metricsPollingStopwatch.elapsed(TimeUnit.MINUTES)); // Call listTimeSeries every minute Thread.sleep(Duration.ofMinutes(1).toMillis()); response = metricClient.listTimeSeriesCallable().call(request); @@ -176,5 +348,64 @@ private void verifyMetricsArePublished( assertWithMessage("View " + view + " didn't return any data.") .that(response.getTimeSeriesCount()) .isGreaterThan(0); + + return response; + } + + private void verifyMetricsWithMetricsReader( + ListTimeSeriesResponse response, MetricData dataFromReader) { + for (TimeSeries ts : response.getTimeSeriesList()) { + Map attributesMap = + ImmutableMap.builder() + .putAll(ts.getResource().getLabelsMap()) + .putAll(ts.getMetric().getLabelsMap()) + .build(); + AttributesBuilder attributesBuilder = Attributes.builder(); + String streamingKey = BuiltinMetricsConstants.STREAMING_KEY.getKey(); + attributesMap.forEach( + (k, v) -> { + if (!k.equals(streamingKey)) { + attributesBuilder.put(k, v); + } + }); + if (attributesMap.containsKey(streamingKey)) { + attributesBuilder.put(streamingKey, Boolean.parseBoolean(attributesMap.get(streamingKey))); + } + Attributes attributes = attributesBuilder.build(); + verifyAttributes(dataFromReader, attributes); + long expectedValue = getAggregatedValue(dataFromReader, attributes); + Timestamp startTime = getStartTimeSeconds(dataFromReader, attributes); + assertThat(startTime.getSeconds()).isGreaterThan(0); + List point = + ts.getPointsList().stream() + .filter( + p -> + Timestamps.compare(p.getInterval().getStartTime(), startTime) >= 0 + && Timestamps.compare( + p.getInterval().getStartTime(), + Timestamps.add( + startTime, + com.google.protobuf.Duration.newBuilder() + .setSeconds(60) + .build())) + < 0) + .collect(Collectors.toList()); + if (point.size() > 0) { + long actualValue = (long) point.get(0).getValue().getDistributionValue().getMean(); + assertWithMessage( + "actual value does not match expected value, actual value " + + actualValue + + " expected value " + + expectedValue + + " actual start time " + + point.get(0).getInterval().getStartTime() + + " expected start time " + + startTime) + .that(actualValue) + .isIn( + Range.range( + expectedValue - 1, BoundType.CLOSED, expectedValue + 1, BoundType.CLOSED)); + } + } } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java new file mode 100644 index 0000000000..56f6bfa476 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.it; + +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants; +import com.google.common.truth.Correspondence; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.PointData; + +public class MetricsITUtils { + + static final Correspondence METRIC_DATA_NAME_CONTAINS = + Correspondence.from((md, s) -> md.getName().contains(s), "contains name"); + + static final Correspondence POINT_DATA_CLUSTER_ID_CONTAINS = + Correspondence.from( + (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY).contains(s), + "contains attributes"); + + static final Correspondence POINT_DATA_ZONE_ID_CONTAINS = + Correspondence.from( + (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY).contains(s), + "contains attributes"); +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java index b0e12d5ade..84ab24f1c8 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java @@ -15,37 +15,76 @@ */ package com.google.cloud.bigtable.data.v2.it; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.METRIC_DATA_NAME_CONTAINS; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_CLUSTER_ID_CONTAINS; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_ZONE_ID_CONTAINS; import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.TruthJUnit.assume; import com.google.api.core.ApiFuture; import com.google.api.gax.rpc.NotFoundException; import com.google.cloud.bigtable.admin.v2.models.Cluster; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; -import com.google.cloud.bigtable.stats.BuiltinViews; -import com.google.cloud.bigtable.stats.StatsWrapper; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView; +import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider; import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv; import com.google.cloud.bigtable.test_helpers.env.TestEnvRule; import com.google.common.collect.Lists; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.PointData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; -import org.junit.BeforeClass; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; public class StreamingMetricsMetadataIT { @ClassRule public static TestEnvRule testEnvRule = new TestEnvRule(); - @BeforeClass - public static void setUpClass() { + private BigtableDataClient client; + private InMemoryMetricReader metricReader; + + @Before + public void setup() throws IOException { assume() .withMessage("StreamingMetricsMetadataIT is not supported on Emulator") .that(testEnvRule.env()) .isNotInstanceOf(EmulatorEnv.class); - BuiltinViews.registerBigtableBuiltinViews(); + + BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder(); + + metricReader = InMemoryMetricReader.create(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + + settings.setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)); + client = BigtableDataClient.create(settings.build()); + } + + @After + public void tearDown() throws IOException { + if (client != null) { + client.close(); + } } @Test @@ -54,7 +93,7 @@ public void testSuccess() throws Exception { String uniqueKey = prefix + "-read"; Query query = Query.create(testEnvRule.env().getTableId()).rowKey(uniqueKey); - ArrayList rows = Lists.newArrayList(testEnvRule.env().getDataClient().readRows(query)); + ArrayList rows = Lists.newArrayList(client.readRows(query)); ApiFuture> clustersFuture = testEnvRule @@ -64,27 +103,73 @@ public void testSuccess() throws Exception { List clusters = clustersFuture.get(1, TimeUnit.MINUTES); - // give opencensus some time to populate view data - Thread.sleep(100); + Collection allMetricData = metricReader.collectAllMetrics(); + List metrics = + metricReader.collectAllMetrics().stream() + .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME)) + .collect(Collectors.toList()); + + assertThat(allMetricData) + .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS) + .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME); + assertThat(metrics).hasSize(1); - List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings(); - assertThat(tagValueStrings).contains(clusters.get(0).getZone()); - assertThat(tagValueStrings).contains(clusters.get(0).getId()); + MetricData metricData = metrics.get(0); + List pointData = new ArrayList<>(metricData.getData().getPoints()); + List clusterAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY)) + .collect(Collectors.toList()); + List zoneAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY)) + .collect(Collectors.toList()); + + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS) + .contains(clusters.get(0).getId()); + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS) + .contains(clusters.get(0).getZone()); + assertThat(clusterAttributes).contains(clusters.get(0).getId()); + assertThat(zoneAttributes).contains(clusters.get(0).getZone()); } @Test - public void testFailure() throws InterruptedException { + public void testFailure() { Query query = Query.create("non-exist-table"); try { - Lists.newArrayList(testEnvRule.env().getDataClient().readRows(query)); + Lists.newArrayList(client.readRows(query)); } catch (NotFoundException e) { } - // give opencensus some time to populate view data - Thread.sleep(100); + Collection allMetricData = metricReader.collectAllMetrics(); + List metrics = + metricReader.collectAllMetrics().stream() + .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME)) + .collect(Collectors.toList()); + + assertThat(allMetricData) + .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS) + .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME); + assertThat(metrics).hasSize(1); + + MetricData metricData = metrics.get(0); + List pointData = new ArrayList<>(metricData.getData().getPoints()); + List clusterAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY)) + .collect(Collectors.toList()); + List zoneAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY)) + .collect(Collectors.toList()); - List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings(); - assertThat(tagValueStrings).contains("unspecified"); - assertThat(tagValueStrings).contains("global"); + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS) + .contains("unspecified"); + assertThat(pointData).comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS).contains("global"); + assertThat(clusterAttributes).contains("unspecified"); + assertThat(zoneAttributes).contains("global"); } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java index aa2a4317fc..42adb8ea6e 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java @@ -15,35 +15,76 @@ */ package com.google.cloud.bigtable.data.v2.it; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.METRIC_DATA_NAME_CONTAINS; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_CLUSTER_ID_CONTAINS; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_ZONE_ID_CONTAINS; import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.TruthJUnit.assume; import com.google.api.core.ApiFuture; import com.google.api.gax.rpc.NotFoundException; import com.google.cloud.bigtable.admin.v2.models.Cluster; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.models.RowMutation; -import com.google.cloud.bigtable.stats.BuiltinViews; -import com.google.cloud.bigtable.stats.StatsWrapper; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView; +import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider; import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv; import com.google.cloud.bigtable.test_helpers.env.TestEnvRule; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.PointData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import org.junit.BeforeClass; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; public class UnaryMetricsMetadataIT { @ClassRule public static TestEnvRule testEnvRule = new TestEnvRule(); - @BeforeClass - public static void setUpClass() { + private BigtableDataClient client; + private InMemoryMetricReader metricReader; + + @Before + public void setup() throws IOException { assume() .withMessage("UnaryMetricsMetadataIT is not supported on Emulator") .that(testEnvRule.env()) .isNotInstanceOf(EmulatorEnv.class); - BuiltinViews.registerBigtableBuiltinViews(); + + BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder(); + + metricReader = InMemoryMetricReader.create(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + + settings.setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)); + + client = BigtableDataClient.create(settings.build()); + } + + @After + public void tearDown() throws IOException { + if (client != null) { + client.close(); + } } @Test @@ -52,9 +93,7 @@ public void testSuccess() throws Exception { String familyId = testEnvRule.env().getFamilyId(); ApiFuture future = - testEnvRule - .env() - .getDataClient() + client .mutateRowCallable() .futureCall( RowMutation.create(testEnvRule.env().getTableId(), rowKey) @@ -69,18 +108,36 @@ public void testSuccess() throws Exception { .listClustersAsync(testEnvRule.env().getInstanceId()); List clusters = clustersFuture.get(1, TimeUnit.MINUTES); - // give opencensus some time to populate view data - for (int i = 0; i < 10; i++) { - if (StatsWrapper.getOperationLatencyViewTagValueStrings() - .contains(clusters.get(0).getZone())) { - break; - } - Thread.sleep(100); - } + Collection allMetricData = metricReader.collectAllMetrics(); + List metrics = + allMetricData.stream() + .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME)) + .collect(Collectors.toList()); + + assertThat(allMetricData) + .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS) + .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME); + assertThat(metrics).hasSize(1); + + MetricData metricData = metrics.get(0); + List pointData = new ArrayList<>(metricData.getData().getPoints()); + List clusterAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY)) + .collect(Collectors.toList()); + List zoneAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY)) + .collect(Collectors.toList()); - List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings(); - assertThat(tagValueStrings).contains(clusters.get(0).getZone()); - assertThat(tagValueStrings).contains(clusters.get(0).getId()); + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS) + .contains(clusters.get(0).getId()); + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS) + .contains(clusters.get(0).getZone()); + assertThat(clusterAttributes).contains(clusters.get(0).getId()); + assertThat(zoneAttributes).contains(clusters.get(0).getZone()); } @Test @@ -89,9 +146,7 @@ public void testFailure() throws Exception { String familyId = testEnvRule.env().getFamilyId(); ApiFuture future = - testEnvRule - .env() - .getDataClient() + client .mutateRowCallable() .futureCall( RowMutation.create("non-exist-table", rowKey).setCell(familyId, "q", "myVal")); @@ -106,16 +161,39 @@ public void testFailure() throws Exception { } } - // give opencensus some time to populate view data - for (int i = 0; i < 10; i++) { - if (StatsWrapper.getOperationLatencyViewTagValueStrings().contains("unspecified")) { + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = null; + for (MetricData md : allMetricData) { + if (md.getName() + .equals( + BuiltinMetricsConstants.METER_NAME + + BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME)) { + metricData = md; break; } - Thread.sleep(100); } - List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings(); - assertThat(tagValueStrings).contains("unspecified"); - assertThat(tagValueStrings).contains("global"); + assertThat(allMetricData) + .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS) + .contains(BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME); + assertThat(metricData).isNotNull(); + + List pointData = new ArrayList<>(metricData.getData().getPoints()); + + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS) + .contains("unspecified"); + assertThat(pointData).comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS).contains("global"); + List clusterAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY)) + .collect(Collectors.toList()); + List zoneAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY)) + .collect(Collectors.toList()); + + assertThat(clusterAttributes).contains("unspecified"); + assertThat(zoneAttributes).contains("global"); } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java index 79cbccb0ac..290fcc321f 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java @@ -885,6 +885,7 @@ public void enableRetryInfoFalseValueTest() throws IOException { "generateInitialChangeStreamPartitionsSettings", "readChangeStreamSettings", "pingAndWarmSettings", + "metricsProvider", }; @Test diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java index 1975d0da25..abbf46c468 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java @@ -18,12 +18,9 @@ import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.assertThrows; -import com.google.api.gax.core.NoCredentialsProvider; import com.google.api.gax.grpc.GrpcStatusCode; -import com.google.api.gax.grpc.GrpcTransportChannel; import com.google.api.gax.rpc.ApiException; import com.google.api.gax.rpc.ErrorDetails; -import com.google.api.gax.rpc.FixedTransportChannelProvider; import com.google.api.gax.rpc.InternalException; import com.google.api.gax.rpc.UnavailableException; import com.google.bigtable.v2.BigtableGrpc; @@ -45,6 +42,7 @@ import com.google.bigtable.v2.SampleRowKeysResponse; import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.BigtableDataSettings; +import com.google.cloud.bigtable.data.v2.FakeServiceBuilder; import com.google.cloud.bigtable.data.v2.models.BulkMutation; import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; import com.google.cloud.bigtable.data.v2.models.Filters; @@ -55,22 +53,31 @@ import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.RowMutation; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; +import com.google.cloud.bigtable.data.v2.models.TableId; import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableList; import com.google.common.collect.Queues; import com.google.protobuf.Any; import com.google.rpc.RetryInfo; +import io.grpc.ForwardingServerCall; import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Server; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; -import io.grpc.testing.GrpcServerRule; import java.io.IOException; import java.time.Duration; +import java.util.HashSet; import java.util.Queue; +import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import org.junit.After; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @@ -78,12 +85,13 @@ @RunWith(JUnit4.class) public class RetryInfoTest { - @Rule public GrpcServerRule serverRule = new GrpcServerRule(); - private static final Metadata.Key ERROR_DETAILS_KEY = Metadata.Key.of("grpc-status-details-bin", Metadata.BINARY_BYTE_MARSHALLER); + private final Set methods = new HashSet<>(); + private FakeBigtableService service; + private Server server; private BigtableDataClient client; private BigtableDataSettings.Builder settings; @@ -94,29 +102,111 @@ public class RetryInfoTest { @Before public void setUp() throws IOException { service = new FakeBigtableService(); - serverRule.getServiceRegistry().addService(service); + + ServerInterceptor serverInterceptor = + new ServerInterceptor() { + @Override + public ServerCall.Listener interceptCall( + ServerCall serverCall, + Metadata metadata, + ServerCallHandler serverCallHandler) { + return serverCallHandler.startCall( + new ForwardingServerCall.SimpleForwardingServerCall(serverCall) { + @Override + public void close(Status status, Metadata trailers) { + if (trailers.containsKey(ERROR_DETAILS_KEY)) { + methods.add(serverCall.getMethodDescriptor().getBareMethodName()); + } + super.close(status, trailers); + } + }, + metadata); + } + }; + server = FakeServiceBuilder.create(service).intercept(serverInterceptor).start(); settings = - BigtableDataSettings.newBuilder() + BigtableDataSettings.newBuilderForEmulator(server.getPort()) .setProjectId("fake-project") - .setInstanceId("fake-instance") - .setCredentialsProvider(NoCredentialsProvider.create()); - - settings - .stubSettings() - .setTransportChannelProvider( - FixedTransportChannelProvider.create( - GrpcTransportChannel.create(serverRule.getChannel()))) - // channel priming doesn't work with FixedTransportChannelProvider. Disable it for the test - .setRefreshingChannel(false) - .build(); + .setInstanceId("fake-instance"); this.client = BigtableDataClient.create(settings.build()); } + @After + public void tearDown() { + if (client != null) { + client.close(); + } + if (server != null) { + server.shutdown(); + } + } + @Test - public void testReadRow() { - verifyRetryInfoIsUsed(() -> client.readRow("table", "row"), true); + public void testAllMethods() { + // Verify retry info is handled correctly for all the methods in data API. + verifyRetryInfoIsUsed(() -> client.readRow(TableId.of("table"), "row"), true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> client.readRows(Query.create(TableId.of("table"))).iterator().hasNext(), true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> + client.bulkMutateRows( + BulkMutation.create(TableId.of("fake-table")) + .add(RowMutationEntry.create("row-key-1").setCell("cf", "q", "v"))), + true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> + client.mutateRow( + RowMutation.create(TableId.of("fake-table"), "key").setCell("cf", "q", "v")), + true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed(() -> client.sampleRowKeys(TableId.of("table")), true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> + client.checkAndMutateRow( + ConditionalRowMutation.create("table", "key") + .condition(Filters.FILTERS.value().regex("old-value")) + .then(Mutation.create().setCell("cf", "q", "v"))), + true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> + client.readModifyWriteRow( + ReadModifyWriteRow.create("table", "row").append("cf", "q", "v")), + true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> client.readChangeStream(ReadChangeStreamQuery.create("table")).iterator().hasNext(), + true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> client.generateInitialChangeStreamPartitions("table").iterator().hasNext(), true); + + // Verify that the new data API methods are tested or excluded. This is enforced by + // introspecting grpc + // method descriptors. + Set expected = + BigtableGrpc.getServiceDescriptor().getMethods().stream() + .map(MethodDescriptor::getBareMethodName) + .collect(Collectors.toSet()); + + // Exclude methods that don't support retry info + methods.add("PingAndWarm"); + + assertThat(methods).containsExactlyElementsIn(expected); } @Test @@ -147,11 +237,6 @@ public void testReadRowServerNotReturningRetryInfoClientDisabledHandling() throw } } - @Test - public void testReadRows() { - verifyRetryInfoIsUsed(() -> client.readRows(Query.create("table")).iterator().hasNext(), true); - } - @Test public void testReadRowsNonRetraybleErrorWithRetryInfo() { verifyRetryInfoIsUsed(() -> client.readRows(Query.create("table")).iterator().hasNext(), false); @@ -181,16 +266,6 @@ public void testReadRowsServerNotReturningRetryInfoClientDisabledHandling() thro } } - @Test - public void testMutateRows() { - verifyRetryInfoIsUsed( - () -> - client.bulkMutateRows( - BulkMutation.create("fake-table") - .add(RowMutationEntry.create("row-key-1").setCell("cf", "q", "v"))), - true); - } - @Test public void testMutateRowsNonRetryableErrorWithRetryInfo() { verifyRetryInfoIsUsed( @@ -238,12 +313,6 @@ public void testMutateRowsServerNotReturningRetryInfoClientDisabledHandling() th } } - @Test - public void testMutateRow() { - verifyRetryInfoIsUsed( - () -> client.mutateRow(RowMutation.create("table", "key").setCell("cf", "q", "v")), true); - } - @Test public void testMutateRowNonRetryableErrorWithRetryInfo() { verifyRetryInfoIsUsed( @@ -278,11 +347,6 @@ public void testMutateRowServerNotReturningRetryInfoClientDisabledHandling() thr } } - @Test - public void testSampleRowKeys() { - verifyRetryInfoIsUsed(() -> client.sampleRowKeys("table"), true); - } - @Test public void testSampleRowKeysNonRetryableErrorWithRetryInfo() { verifyRetryInfoIsUsed(() -> client.sampleRowKeys("table"), false); @@ -312,17 +376,6 @@ public void testSampleRowKeysServerNotReturningRetryInfoClientDisabledHandling() } } - @Test - public void testCheckAndMutateRow() { - verifyRetryInfoIsUsed( - () -> - client.checkAndMutateRow( - ConditionalRowMutation.create("table", "key") - .condition(Filters.FILTERS.value().regex("old-value")) - .then(Mutation.create().setCell("cf", "q", "v"))), - true); - } - @Test public void testCheckAndMutateDisableRetryInfo() throws IOException { settings.stubSettings().setEnableRetryInfo(false); @@ -368,15 +421,6 @@ public void testCheckAndMutateServerNotReturningRetryInfoClientDisabledHandling( } } - @Test - public void testReadModifyWrite() { - verifyRetryInfoIsUsed( - () -> - client.readModifyWriteRow( - ReadModifyWriteRow.create("table", "row").append("cf", "q", "v")), - true); - } - @Test public void testReadModifyWriteDisableRetryInfo() throws IOException { settings.stubSettings().setEnableRetryInfo(false); @@ -414,13 +458,6 @@ public void testReadModifyWriteNotReturningRetryInfoClientDisabledHandling() thr } } - @Test - public void testReadChangeStream() { - verifyRetryInfoIsUsed( - () -> client.readChangeStream(ReadChangeStreamQuery.create("table")).iterator().hasNext(), - true); - } - @Test public void testReadChangeStreamNonRetryableErrorWithRetryInfo() { verifyRetryInfoIsUsed( @@ -465,12 +502,6 @@ public void testReadChangeStreamNotReturningRetryInfoClientDisabledHandling() th } } - @Test - public void testGenerateInitialChangeStreamPartition() { - verifyRetryInfoIsUsed( - () -> client.generateInitialChangeStreamPartitions("table").iterator().hasNext(), true); - } - @Test public void testGenerateInitialChangeStreamPartitionNonRetryableError() { verifyRetryInfoIsUsed( diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java new file mode 100644 index 0000000000..a0b9c058dc --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java @@ -0,0 +1,310 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; +import static com.google.common.truth.Truth.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.api.Distribution; +import com.google.api.MonitoredResource; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.monitoring.v3.MetricServiceClient; +import com.google.cloud.monitoring.v3.stub.MetricServiceStub; +import com.google.common.collect.ImmutableList; +import com.google.monitoring.v3.CreateTimeSeriesRequest; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Empty; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.common.InstrumentationScopeInfo; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramPointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableSumData; +import io.opentelemetry.sdk.resources.Resource; +import java.util.Arrays; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; + +public class BigtableCloudMonitoringExporterTest { + private static final String projectId = "fake-project"; + private static final String instanceId = "fake-instance"; + private static final String appProfileId = "default"; + private static final String tableId = "fake-table"; + private static final String zone = "us-east-1"; + private static final String cluster = "cluster-1"; + + private static final String clientName = "fake-client-name"; + private static final String taskId = "fake-task-id"; + + @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule(); + + @Mock private MetricServiceStub mockMetricServiceStub; + private MetricServiceClient fakeMetricServiceClient; + private BigtableCloudMonitoringExporter exporter; + + private Attributes attributes; + private Resource resource; + private InstrumentationScopeInfo scope; + + @Before + public void setUp() { + fakeMetricServiceClient = new FakeMetricServiceClient(mockMetricServiceStub); + + exporter = + new BigtableCloudMonitoringExporter( + projectId, fakeMetricServiceClient, /* applicationResource= */ null, taskId); + + attributes = + Attributes.builder() + .put(BIGTABLE_PROJECT_ID_KEY, projectId) + .put(INSTANCE_ID_KEY, instanceId) + .put(TABLE_ID_KEY, tableId) + .put(CLUSTER_ID_KEY, cluster) + .put(ZONE_ID_KEY, zone) + .put(APP_PROFILE_KEY, appProfileId) + .build(); + + resource = Resource.create(Attributes.empty()); + + scope = InstrumentationScopeInfo.create(BuiltinMetricsConstants.METER_NAME); + } + + @After + public void tearDown() {} + + @Test + public void testExportingSumData() { + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long fakeValue = 11L; + + long startEpoch = 10; + long endEpoch = 15; + LongPointData longPointData = + ImmutableLongPointData.create(startEpoch, endEpoch, attributes, fakeValue); + + MetricData longData = + ImmutableMetricData.createLongSum( + resource, + scope, + "bigtable.googleapis.com/internal/client/retry_count", + "description", + "1", + ImmutableSumData.create( + true, AggregationTemporality.CUMULATIVE, ImmutableList.of(longPointData))); + + exporter.export(Arrays.asList(longData)); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + + assertThat(request.getTimeSeriesList()).hasSize(1); + + TimeSeries timeSeries = request.getTimeSeriesList().get(0); + + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly( + BIGTABLE_PROJECT_ID_KEY.getKey(), projectId, + INSTANCE_ID_KEY.getKey(), instanceId, + TABLE_ID_KEY.getKey(), tableId, + CLUSTER_ID_KEY.getKey(), cluster, + ZONE_ID_KEY.getKey(), zone); + + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2); + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId); + assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(fakeValue); + assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos()) + .isEqualTo(startEpoch); + assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch); + } + + @Test + public void testExportingHistogramData() { + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long startEpoch = 10; + long endEpoch = 15; + HistogramPointData histogramPointData = + ImmutableHistogramPointData.create( + startEpoch, + endEpoch, + attributes, + 3d, + true, + 1d, // min + true, + 2d, // max + Arrays.asList(1.0), + Arrays.asList(1L, 2L)); + + MetricData histogramData = + ImmutableMetricData.createDoubleHistogram( + resource, + scope, + "bigtable.googleapis.com/internal/client/operation_latencies", + "description", + "ms", + ImmutableHistogramData.create( + AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData))); + + exporter.export(Arrays.asList(histogramData)); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + + assertThat(request.getTimeSeriesList()).hasSize(1); + + TimeSeries timeSeries = request.getTimeSeriesList().get(0); + + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly( + BIGTABLE_PROJECT_ID_KEY.getKey(), projectId, + INSTANCE_ID_KEY.getKey(), instanceId, + TABLE_ID_KEY.getKey(), tableId, + CLUSTER_ID_KEY.getKey(), cluster, + ZONE_ID_KEY.getKey(), zone); + + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2); + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId); + Distribution distribution = timeSeries.getPoints(0).getValue().getDistributionValue(); + assertThat(distribution.getCount()).isEqualTo(3); + assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos()) + .isEqualTo(startEpoch); + assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch); + } + + @Test + public void testTimeSeriesForMetricWithGceOrGkeResource() { + String gceProjectId = "fake-gce-project"; + BigtableCloudMonitoringExporter exporter = + new BigtableCloudMonitoringExporter( + projectId, + fakeMetricServiceClient, + MonitoredResource.newBuilder() + .setType("gce-instance") + .putLabels("some-gce-key", "some-gce-value") + .putLabels("project_id", gceProjectId) + .build(), + taskId); + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long startEpoch = 10; + long endEpoch = 15; + HistogramPointData histogramPointData = + ImmutableHistogramPointData.create( + startEpoch, + endEpoch, + Attributes.of( + BIGTABLE_PROJECT_ID_KEY, + projectId, + INSTANCE_ID_KEY, + instanceId, + APP_PROFILE_KEY, + appProfileId, + CLIENT_NAME_KEY, + clientName), + 3d, + true, + 1d, // min + true, + 2d, // max + Arrays.asList(1.0), + Arrays.asList(1L, 2L)); + + MetricData histogramData = + ImmutableMetricData.createDoubleHistogram( + resource, + scope, + "bigtable.googleapis.com/internal/client/per_connection_error_count", + "description", + "ms", + ImmutableHistogramData.create( + AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData))); + + exporter.export(Arrays.asList(histogramData)); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + + assertThat(request.getName()).isEqualTo("projects/" + gceProjectId); + assertThat(request.getTimeSeriesList()).hasSize(1); + + com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0); + + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly("some-gce-key", "some-gce-value", "project_id", gceProjectId); + + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5); + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsAtLeast( + BIGTABLE_PROJECT_ID_KEY.getKey(), + projectId, + INSTANCE_ID_KEY.getKey(), + instanceId, + APP_PROFILE_KEY.getKey(), + appProfileId, + CLIENT_NAME_KEY.getKey(), + clientName, + CLIENT_UID_KEY.getKey(), + taskId); + } + + private static class FakeMetricServiceClient extends MetricServiceClient { + + protected FakeMetricServiceClient(MetricServiceStub stub) { + super(stub); + } + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java index 5d16b623fd..a12dd3cfbd 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java @@ -45,7 +45,6 @@ import com.google.cloud.bigtable.data.v2.models.SampleRowKeysRequest; import com.google.cloud.bigtable.data.v2.models.TableId; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; -import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; import com.google.common.collect.ImmutableMap; import io.grpc.ForwardingServerCall.SimpleForwardingServerCall; import io.grpc.Metadata; @@ -126,16 +125,21 @@ public void sendHeaders(Metadata headers) { .setInstanceId(INSTANCE_ID) .setAppProfileId(APP_PROFILE_ID) .build(); - EnhancedBigtableStubSettings stubSettings = - settings - .getStubSettings() + + ClientContext clientContext = + EnhancedBigtableStub.createClientContext(settings.getStubSettings()); + clientContext = + clientContext .toBuilder() .setTracerFactory( EnhancedBigtableStub.createBigtableTracerFactory( - settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder())) + settings.getStubSettings(), + Tags.getTagger(), + localStats.getStatsRecorder(), + null)) .build(); - attempts = stubSettings.readRowsSettings().getRetrySettings().getMaxAttempts(); - stub = new EnhancedBigtableStub(stubSettings, ClientContext.create(stubSettings)); + attempts = settings.getStubSettings().readRowsSettings().getRetrySettings().getMaxAttempts(); + stub = new EnhancedBigtableStub(settings.getStubSettings(), clientContext); // Create another server without injecting the server-timing header and another stub that // connects to it. @@ -147,18 +151,21 @@ public void sendHeaders(Metadata headers) { .setInstanceId(INSTANCE_ID) .setAppProfileId(APP_PROFILE_ID) .build(); - EnhancedBigtableStubSettings noHeaderStubSettings = - noHeaderSettings - .getStubSettings() + + ClientContext noHeaderClientContext = + EnhancedBigtableStub.createClientContext(noHeaderSettings.getStubSettings()); + noHeaderClientContext = + noHeaderClientContext .toBuilder() .setTracerFactory( EnhancedBigtableStub.createBigtableTracerFactory( noHeaderSettings.getStubSettings(), Tags.getTagger(), - localStats.getStatsRecorder())) + localStats.getStatsRecorder(), + null)) .build(); noHeaderStub = - new EnhancedBigtableStub(noHeaderStubSettings, ClientContext.create(noHeaderStubSettings)); + new EnhancedBigtableStub(noHeaderSettings.getStubSettings(), noHeaderClientContext); } @After diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java new file mode 100644 index 0000000000..09b7e1f663 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java @@ -0,0 +1,112 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.core.InternalApi; +import com.google.protobuf.Timestamp; +import com.google.protobuf.util.Timestamps; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; +import org.junit.Assert; + +@InternalApi +public class BuiltinMetricsTestUtils { + + private BuiltinMetricsTestUtils() {} + + public static MetricData getMetricData(Collection allMetricData, String metricName) { + List metricDataList = + allMetricData.stream() + .filter(md -> md.getName().equals(BuiltinMetricsConstants.METER_NAME + metricName)) + .collect(Collectors.toList()); + if (metricDataList.size() == 0) { + allMetricData.stream().forEach(md -> System.out.println(md.getName())); + } + assertThat(metricDataList.size()).isEqualTo(1); + + return metricDataList.get(0); + } + + public static long getAggregatedValue(MetricData metricData, Attributes attributes) { + switch (metricData.getType()) { + case HISTOGRAM: + HistogramPointData hd = + metricData.getHistogramData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .get(0); + return (long) hd.getSum() / hd.getCount(); + case LONG_SUM: + LongPointData ld = + metricData.getLongSumData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .get(0); + return ld.getValue(); + default: + return 0; + } + } + + public static Timestamp getStartTimeSeconds(MetricData metricData, Attributes attributes) { + switch (metricData.getType()) { + case HISTOGRAM: + HistogramPointData hd = + metricData.getHistogramData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .get(0); + return Timestamps.fromNanos(hd.getStartEpochNanos()); + case LONG_SUM: + LongPointData ld = + metricData.getLongSumData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .get(0); + return Timestamps.fromNanos(ld.getStartEpochNanos()); + default: + return Timestamp.getDefaultInstance(); + } + } + + public static void verifyAttributes(MetricData metricData, Attributes attributes) { + switch (metricData.getType()) { + case HISTOGRAM: + List hd = + metricData.getHistogramData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()); + assertThat(hd).isNotEmpty(); + break; + case LONG_SUM: + List ld = + metricData.getLongSumData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()); + assertThat(ld).isNotEmpty(); + break; + default: + Assert.fail("Unexpected type"); + } + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java index 06b923cad3..2dd4bcabb3 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java @@ -15,14 +15,24 @@ */ package com.google.cloud.bigtable.data.v2.stub.metrics; -import static com.google.api.gax.tracing.ApiTracerFactory.OperationType; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedValue; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getMetricData; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.verifyAttributes; import static com.google.common.truth.Truth.assertThat; -import static org.junit.Assert.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; import com.google.api.client.util.Lists; import com.google.api.core.ApiFunction; @@ -36,7 +46,6 @@ import com.google.api.gax.rpc.NotFoundException; import com.google.api.gax.rpc.ResponseObserver; import com.google.api.gax.rpc.StreamController; -import com.google.api.gax.tracing.SpanName; import com.google.bigtable.v2.BigtableGrpc; import com.google.bigtable.v2.MutateRowRequest; import com.google.bigtable.v2.MutateRowResponse; @@ -45,6 +54,7 @@ import com.google.bigtable.v2.ReadRowsRequest; import com.google.bigtable.v2.ReadRowsResponse; import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.Version; import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.FakeServiceBuilder; import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId; @@ -52,9 +62,9 @@ import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutation; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; +import com.google.cloud.bigtable.data.v2.models.TableId; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; -import com.google.cloud.bigtable.stats.StatsRecorderWrapper; import com.google.common.base.Stopwatch; import com.google.common.collect.Range; import com.google.protobuf.ByteString; @@ -77,11 +87,21 @@ import io.grpc.StatusRuntimeException; import io.grpc.stub.ServerCallStreamObserver; import io.grpc.stub.StreamObserver; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.View; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; import java.nio.charset.Charset; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -92,12 +112,8 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; import org.mockito.junit.MockitoJUnit; import org.mockito.junit.MockitoRule; -import org.mockito.stubbing.Answer; import org.threeten.bp.Duration; @RunWith(JUnit4.class) @@ -105,8 +121,8 @@ public class BuiltinMetricsTracerTest { private static final String PROJECT_ID = "fake-project"; private static final String INSTANCE_ID = "fake-instance"; private static final String APP_PROFILE_ID = "default"; - private static final String TABLE_ID = "fake-table"; - private static final String AUTHORIZED_VIEW_ID = "fake-authorized-view"; + private static final String TABLE = "fake-table"; + private static final String BAD_TABLE_ID = "non-exist-table"; private static final String ZONE = "us-west-1"; private static final String CLUSTER = "cluster-0"; @@ -114,6 +130,7 @@ public class BuiltinMetricsTracerTest { private static final long SERVER_LATENCY = 100; private static final long APPLICATION_LATENCY = 200; private static final long SLEEP_VARIABILITY = 15; + private static final String CLIENT_NAME = "java-bigtable/" + Version.VERSION; private static final long CHANNEL_BLOCKING_LATENCY = 75; @@ -124,18 +141,35 @@ public class BuiltinMetricsTracerTest { private EnhancedBigtableStub stub; - @Mock private BuiltinMetricsTracerFactory mockFactory; - @Mock private StatsRecorderWrapper statsRecorderWrapper; + private int batchElementCount = 2; - @Captor private ArgumentCaptor status; - @Captor private ArgumentCaptor tableId; - @Captor private ArgumentCaptor zone; - @Captor private ArgumentCaptor cluster; + private Attributes baseAttributes; - private int batchElementCount = 2; + private InMemoryMetricReader metricReader; @Before public void setUp() throws Exception { + metricReader = InMemoryMetricReader.create(); + + baseAttributes = + Attributes.builder() + .put(BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY, PROJECT_ID) + .put(BuiltinMetricsConstants.INSTANCE_ID_KEY, INSTANCE_ID) + .put(BuiltinMetricsConstants.APP_PROFILE_KEY, APP_PROFILE_ID) + .build(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + + for (Map.Entry entry : + BuiltinMetricsConstants.getAllViews().entrySet()) { + meterProvider.registerView(entry.getKey(), entry.getValue()); + } + + OpenTelemetrySdk otel = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + BuiltinMetricsTracerFactory facotry = BuiltinMetricsTracerFactory.create(otel, baseAttributes); + // Add an interceptor to add server-timing in headers ServerInterceptor trailersInterceptor = new ServerInterceptor() { @@ -216,7 +250,8 @@ public void sendMessage(ReqT message) { .setMaxOutstandingRequestBytes(1001L) .build()) .build()); - stubSettingsBuilder.setTracerFactory(mockFactory); + + stubSettingsBuilder.setTracerFactory(facotry); InstantiatingGrpcChannelProvider.Builder channelProvider = ((InstantiatingGrpcChannelProvider) stubSettingsBuilder.getTransportChannelProvider()) @@ -247,117 +282,117 @@ public void tearDown() { @Test public void testReadRowsOperationLatencies() { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); - Stopwatch stopwatch = Stopwatch.createStarted(); - Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE_ID)).iterator()); + Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE)).iterator()); long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS); - verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture()); - // verify record operation is only called once - verify(statsRecorderWrapper) - .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(STREAMING_KEY, true) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); + + Collection allMetricData = metricReader.collectAllMetrics(); + + MetricData metricData = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); - assertThat(operationLatency.getValue()).isIn(Range.closed(SERVER_LATENCY, elapsed)); - assertThat(status.getAllValues()).containsExactly("OK"); - assertThat(tableId.getAllValues()).containsExactly(TABLE_ID); - assertThat(zone.getAllValues()).containsExactly(ZONE); - assertThat(cluster.getAllValues()).containsExactly(CLUSTER); + long value = getAggregatedValue(metricData, expectedAttributes); + assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed)); } @Test public void testReadRowsOperationLatenciesOnAuthorizedView() { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); - + String authorizedViewId = "test-authorized-view-id"; Stopwatch stopwatch = Stopwatch.createStarted(); Lists.newArrayList( - stub.readRowsCallable() - .call(Query.create(AuthorizedViewId.of(TABLE_ID, AUTHORIZED_VIEW_ID))) - .iterator()); + stub.readRowsCallable().call(Query.create(AuthorizedViewId.of(TABLE, authorizedViewId)))); long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS); - verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture()); - // verify record operation is only called once - verify(statsRecorderWrapper) - .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(STREAMING_KEY, true) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); - assertThat(operationLatency.getValue()).isIn(Range.closed(SERVER_LATENCY, elapsed)); - assertThat(status.getAllValues()).containsExactly("OK"); - assertThat(tableId.getAllValues()).containsExactly(TABLE_ID); - assertThat(zone.getAllValues()).containsExactly(ZONE); - assertThat(cluster.getAllValues()).containsExactly(CLUSTER); + Collection allMetricData = metricReader.collectAllMetrics(); + + MetricData metricData = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); + long value = getAggregatedValue(metricData, expectedAttributes); + assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed)); } @Test public void testGfeMetrics() { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - ArgumentCaptor gfeLatency = ArgumentCaptor.forClass(Long.class); - ArgumentCaptor gfeMissingHeaders = ArgumentCaptor.forClass(Long.class); - - Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE_ID))); - - // Verify record attempt are called multiple times - verify(statsRecorderWrapper, times(fakeService.getAttemptCounter().get())) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); - - // The request was retried and gfe latency is only recorded in the retry attempt - verify(statsRecorderWrapper).putGfeLatencies(gfeLatency.capture()); - assertThat(gfeLatency.getValue()).isEqualTo(FAKE_SERVER_TIMING); - - // The first time the request was retried, it'll increment missing header counter - verify(statsRecorderWrapper, times(fakeService.getAttemptCounter().get())) - .putGfeMissingHeaders(gfeMissingHeaders.capture()); - assertThat(gfeMissingHeaders.getAllValues()).containsExactly(1L, 0L); - - assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "OK"); - assertThat(tableId.getAllValues()).containsExactly(TABLE_ID, TABLE_ID); - assertThat(zone.getAllValues()).containsExactly("global", ZONE); - assertThat(cluster.getAllValues()).containsExactly("unspecified", CLUSTER); + Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE))); + + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(METHOD_KEY, "Bigtable.ReadRows") + .build(); + + Collection allMetricData = metricReader.collectAllMetrics(); + + MetricData serverLatenciesMetricData = getMetricData(allMetricData, SERVER_LATENCIES_NAME); + + long serverLatencies = getAggregatedValue(serverLatenciesMetricData, expectedAttributes); + assertThat(serverLatencies).isEqualTo(FAKE_SERVER_TIMING); + + MetricData connectivityErrorCountMetricData = + getMetricData(allMetricData, CONNECTIVITY_ERROR_COUNT_NAME); + Attributes expected1 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "UNAVAILABLE") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, "global") + .put(CLUSTER_ID_KEY, "unspecified") + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); + Attributes expected2 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); + + verifyAttributes(connectivityErrorCountMetricData, expected1); + verifyAttributes(connectivityErrorCountMetricData, expected2); + + assertThat(getAggregatedValue(connectivityErrorCountMetricData, expected1)).isEqualTo(1); + assertThat(getAggregatedValue(connectivityErrorCountMetricData, expected2)).isEqualTo(0); } @Test public void testReadRowsApplicationLatencyWithAutoFlowControl() throws Exception { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - - ArgumentCaptor applicationLatency = ArgumentCaptor.forClass(Long.class); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); - final SettableApiFuture future = SettableApiFuture.create(); final AtomicInteger counter = new AtomicInteger(0); // For auto flow control, application latency is the time application spent in onResponse. stub.readRowsCallable() .call( - Query.create(TABLE_ID), + Query.create(TABLE), new ResponseObserver() { @Override public void onStart(StreamController streamController) {} @@ -383,37 +418,38 @@ public void onComplete() { }); future.get(); - verify(statsRecorderWrapper).putApplicationLatencies(applicationLatency.capture()); - verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture()); - verify(statsRecorderWrapper) - .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); - assertThat(counter.get()).isEqualTo(fakeService.getResponseCounter().get()); - // Thread.sleep might not sleep for the requested amount depending on the interrupt period - // defined by the OS. - // On linux this is ~1ms but on windows may be as high as 15-20ms. - assertThat(applicationLatency.getValue()) - .isAtLeast((APPLICATION_LATENCY - SLEEP_VARIABILITY) * counter.get()); - assertThat(applicationLatency.getValue()) - .isAtMost(operationLatency.getValue() - SERVER_LATENCY); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData applicationLatency = + getMetricData(allMetricData, APPLICATION_BLOCKING_LATENCIES_NAME); + + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(METHOD_KEY, "Bigtable.ReadRows") + .build(); + long value = getAggregatedValue(applicationLatency, expectedAttributes); + + assertThat(value).isAtLeast((APPLICATION_LATENCY - SLEEP_VARIABILITY) * counter.get()); + + MetricData operationLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); + long operationLatencyValue = + getAggregatedValue( + operationLatency, + expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build()); + assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY); } @Test public void testReadRowsApplicationLatencyWithManualFlowControl() throws Exception { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - - ArgumentCaptor applicationLatency = ArgumentCaptor.forClass(Long.class); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); int counter = 0; - Iterator rows = stub.readRowsCallable().call(Query.create(TABLE_ID)).iterator(); + Iterator rows = stub.readRowsCallable().call(Query.create(TABLE)).iterator(); while (rows.hasNext()) { counter++; @@ -421,148 +457,189 @@ public void testReadRowsApplicationLatencyWithManualFlowControl() throws Excepti rows.next(); } - verify(statsRecorderWrapper).putApplicationLatencies(applicationLatency.capture()); - verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture()); - verify(statsRecorderWrapper) - .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData applicationLatency = + getMetricData(allMetricData, APPLICATION_BLOCKING_LATENCIES_NAME); + + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(METHOD_KEY, "Bigtable.ReadRows") + .build(); - // For manual flow control, the last application latency shouldn't count, because at that point - // the server already sent back all the responses. + long value = getAggregatedValue(applicationLatency, expectedAttributes); + // For manual flow control, the last application latency shouldn't count, because at that + // point the server already sent back all the responses. assertThat(counter).isEqualTo(fakeService.getResponseCounter().get()); - assertThat(applicationLatency.getValue()) - .isAtLeast(APPLICATION_LATENCY * (counter - 1) - SERVER_LATENCY); - assertThat(applicationLatency.getValue()) - .isAtMost(operationLatency.getValue() - SERVER_LATENCY); + assertThat(value).isAtLeast(APPLICATION_LATENCY * (counter - 1) - SERVER_LATENCY); + + MetricData operationLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); + long operationLatencyValue = + getAggregatedValue( + operationLatency, + expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build()); + assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY); } @Test - public void testRetryCount() { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "MutateRow"), - statsRecorderWrapper)); - - ArgumentCaptor retryCount = ArgumentCaptor.forClass(Integer.class); - + public void testRetryCount() throws InterruptedException { stub.mutateRowCallable() - .call(RowMutation.create(TABLE_ID, "random-row").setCell("cf", "q", "value")); - - // In TracedUnaryCallable, we create a future and add a TraceFinisher to the callback. Main - // thread is blocked on waiting for the future to be completed. When onComplete is called on - // the grpc thread, the future is completed, however we might not have enough time for - // TraceFinisher to run. Add a 1 second time out to wait for the callback. This shouldn't have - // any impact on production code. - verify(statsRecorderWrapper, timeout(1000)).putRetryCount(retryCount.capture()); + .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value")); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, RETRY_COUNT_NAME); + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(METHOD_KEY, "Bigtable.MutateRow") + .put(STATUS_KEY, "OK") + .build(); - assertThat(retryCount.getValue()).isEqualTo(fakeService.getAttemptCounter().get() - 1); + long value = getAggregatedValue(metricData, expectedAttributes); + assertThat(value).isEqualTo(fakeService.getAttemptCounter().get() - 1); } @Test public void testMutateRowAttemptsTagValues() { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRow"), statsRecorderWrapper)); - stub.mutateRowCallable() - .call(RowMutation.create(TABLE_ID, "random-row").setCell("cf", "q", "value")); - - // Set a timeout to reduce flakiness of this test. BasicRetryingFuture will set - // attempt succeeded and set the response which will call complete() in AbstractFuture which - // calls releaseWaiters(). onOperationComplete() is called in TracerFinisher which will be - // called after the mutateRow call is returned. So there's a race between when the call returns - // and when the record() is called in onOperationCompletion(). - verify(statsRecorderWrapper, timeout(50).times(fakeService.getAttemptCounter().get())) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); - assertThat(zone.getAllValues()).containsExactly("global", "global", ZONE); - assertThat(cluster.getAllValues()).containsExactly("unspecified", "unspecified", CLUSTER); - assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "UNAVAILABLE", "OK"); - assertThat(tableId.getAllValues()).containsExactly(TABLE_ID, TABLE_ID, TABLE_ID); + .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value")); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected1 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "UNAVAILABLE") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, "global") + .put(CLUSTER_ID_KEY, "unspecified") + .put(METHOD_KEY, "Bigtable.MutateRow") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, false) + .build(); + + Attributes expected2 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.MutateRow") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, false) + .build(); + + verifyAttributes(metricData, expected1); + verifyAttributes(metricData, expected2); } @Test public void testMutateRowsPartialError() throws InterruptedException { + Batcher batcher = stub.newMutateRowsBatcher(TableId.of(TABLE), null); int numMutations = 6; - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper)); - - Batcher batcher = stub.newMutateRowsBatcher(TABLE_ID, null); for (int i = 0; i < numMutations; i++) { String key = i % 2 == 0 ? "key" : "fail-key"; batcher.add(RowMutationEntry.create(key).setCell("f", "q", "v")); } - assertThrows(BatchingException.class, () -> batcher.close()); - - int expectedNumRequests = numMutations / batchElementCount; - verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests)) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Assert.assertThrows(BatchingException.class, batcher::close); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.MutateRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, false) + .build(); - assertThat(zone.getAllValues()).containsExactly(ZONE, ZONE, ZONE); - assertThat(cluster.getAllValues()).containsExactly(CLUSTER, CLUSTER, CLUSTER); - assertThat(status.getAllValues()).containsExactly("OK", "OK", "OK"); + verifyAttributes(metricData, expected); } @Test public void testMutateRowsRpcError() { + Batcher batcher = + stub.newMutateRowsBatcher(TableId.of(BAD_TABLE_ID), null); int numMutations = 6; - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper)); - - Batcher batcher = stub.newMutateRowsBatcher(BAD_TABLE_ID, null); for (int i = 0; i < numMutations; i++) { - batcher.add(RowMutationEntry.create("key").setCell("f", "q", "v")); + String key = i % 2 == 0 ? "key" : "fail-key"; + batcher.add(RowMutationEntry.create(key).setCell("f", "q", "v")); } - assertThrows(BatchingException.class, () -> batcher.close()); - - int expectedNumRequests = numMutations / batchElementCount; - verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests)) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Assert.assertThrows(BatchingException.class, batcher::close); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "NOT_FOUND") + .put(TABLE_ID_KEY, BAD_TABLE_ID) + .put(ZONE_ID_KEY, "global") + .put(CLUSTER_ID_KEY, "unspecified") + .put(METHOD_KEY, "Bigtable.MutateRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, false) + .build(); - assertThat(zone.getAllValues()).containsExactly("global", "global", "global"); - assertThat(cluster.getAllValues()).containsExactly("unspecified", "unspecified", "unspecified"); - assertThat(status.getAllValues()).containsExactly("NOT_FOUND", "NOT_FOUND", "NOT_FOUND"); + verifyAttributes(metricData, expected); } @Test public void testReadRowsAttemptsTagValues() { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - Lists.newArrayList(stub.readRowsCallable().call(Query.create("fake-table")).iterator()); - // Set a timeout to reduce flakiness of this test. BasicRetryingFuture will set - // attempt succeeded and set the response which will call complete() in AbstractFuture which - // calls releaseWaiters(). onOperationComplete() is called in TracerFinisher which will be - // called after the mutateRow call is returned. So there's a race between when the call returns - // and when the record() is called in onOperationCompletion(). - verify(statsRecorderWrapper, timeout(50).times(fakeService.getAttemptCounter().get())) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); - assertThat(zone.getAllValues()).containsExactly("global", ZONE); - assertThat(cluster.getAllValues()).containsExactly("unspecified", CLUSTER); - assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "OK"); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected1 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "UNAVAILABLE") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, "global") + .put(CLUSTER_ID_KEY, "unspecified") + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, true) + .build(); + + Attributes expected2 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, true) + .build(); + + verifyAttributes(metricData, expected1); + verifyAttributes(metricData, expected2); } @Test public void testBatchBlockingLatencies() throws InterruptedException { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper)); - try (Batcher batcher = stub.newMutateRowsBatcher(TABLE_ID, null)) { + try (Batcher batcher = stub.newMutateRowsBatcher(TABLE, null)) { for (int i = 0; i < 6; i++) { batcher.add(RowMutationEntry.create("key").setCell("f", "q", "v")); } @@ -571,86 +648,100 @@ public void testBatchBlockingLatencies() throws InterruptedException { batcher.close(); int expectedNumRequests = 6 / batchElementCount; - ArgumentCaptor throttledTime = ArgumentCaptor.forClass(Long.class); - verify(statsRecorderWrapper, timeout(1000).times(expectedNumRequests)) - .putClientBlockingLatencies(throttledTime.capture()); - // After the first request is sent, batcher will block on add because of the server latency. - // Blocking latency should be around server latency. - assertThat(throttledTime.getAllValues().get(1)).isAtLeast(SERVER_LATENCY - 10); - assertThat(throttledTime.getAllValues().get(2)).isAtLeast(SERVER_LATENCY - 10); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData applicationLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME); - verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests)) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.MutateRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); - assertThat(zone.getAllValues()).containsExactly(ZONE, ZONE, ZONE); - assertThat(cluster.getAllValues()).containsExactly(CLUSTER, CLUSTER, CLUSTER); + long value = getAggregatedValue(applicationLatency, expectedAttributes); + // After the first request is sent, batcher will block on add because of the server latency. + // Blocking latency should be around server latency. So each data point would be at least + // (SERVER_LATENCY - 10). + long expected = (SERVER_LATENCY - 10) * (expectedNumRequests - 1) / expectedNumRequests; + assertThat(value).isAtLeast(expected); } } @Test - public void testQueuedOnChannelServerStreamLatencies() throws InterruptedException { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - - stub.readRowsCallable().all().call(Query.create(TABLE_ID)); - - ArgumentCaptor blockedTime = ArgumentCaptor.forClass(Long.class); - - verify(statsRecorderWrapper, timeout(1000).times(fakeService.attemptCounter.get())) - .putClientBlockingLatencies(blockedTime.capture()); + public void testQueuedOnChannelServerStreamLatencies() { + stub.readRowsCallable().all().call(Query.create(TABLE)); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData clientLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME); + + Attributes attributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(ZONE_ID_KEY, ZONE) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); - assertThat(blockedTime.getAllValues().get(1)).isAtLeast(CHANNEL_BLOCKING_LATENCY); + long value = getAggregatedValue(clientLatency, attributes); + assertThat(value).isAtLeast(CHANNEL_BLOCKING_LATENCY); } @Test - public void testQueuedOnChannelUnaryLatencies() throws InterruptedException { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRow"), statsRecorderWrapper)); - stub.mutateRowCallable().call(RowMutation.create(TABLE_ID, "a-key").setCell("f", "q", "v")); + public void testQueuedOnChannelUnaryLatencies() { - ArgumentCaptor blockedTime = ArgumentCaptor.forClass(Long.class); + stub.mutateRowCallable().call(RowMutation.create(TABLE, "a-key").setCell("f", "q", "v")); - verify(statsRecorderWrapper, timeout(1000).times(fakeService.attemptCounter.get())) - .putClientBlockingLatencies(blockedTime.capture()); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData clientLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME); - assertThat(blockedTime.getAllValues().get(1)).isAtLeast(CHANNEL_BLOCKING_LATENCY); - assertThat(blockedTime.getAllValues().get(2)).isAtLeast(CHANNEL_BLOCKING_LATENCY); + Attributes attributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(ZONE_ID_KEY, ZONE) + .put(METHOD_KEY, "Bigtable.MutateRow") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); + + long expected = CHANNEL_BLOCKING_LATENCY * 2 / 3; + long actual = getAggregatedValue(clientLatency, attributes); + assertThat(actual).isAtLeast(expected); } @Test public void testPermanentFailure() { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - try { Lists.newArrayList(stub.readRowsCallable().call(Query.create(BAD_TABLE_ID)).iterator()); Assert.fail("Request should throw not found error"); } catch (NotFoundException e) { } - ArgumentCaptor attemptLatency = ArgumentCaptor.forClass(Long.class); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData attemptLatency = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "NOT_FOUND") + .put(TABLE_ID_KEY, BAD_TABLE_ID) + .put(CLUSTER_ID_KEY, "unspecified") + .put(ZONE_ID_KEY, "global") + .put(STREAMING_KEY, true) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); - verify(statsRecorderWrapper, timeout(50)).putAttemptLatencies(attemptLatency.capture()); - verify(statsRecorderWrapper, timeout(50)).putOperationLatencies(operationLatency.capture()); - verify(statsRecorderWrapper, timeout(50)) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + verifyAttributes(attemptLatency, expected); - assertThat(status.getValue()).isEqualTo("NOT_FOUND"); - assertThat(tableId.getValue()).isEqualTo(BAD_TABLE_ID); - assertThat(cluster.getValue()).isEqualTo("unspecified"); - assertThat(zone.getValue()).isEqualTo("global"); + MetricData opLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); + verifyAttributes(opLatency, expected); } private static class FakeService extends BigtableGrpc.BigtableImplBase { diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java index a6670182b8..4ab19a5337 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java @@ -23,17 +23,29 @@ import com.google.api.gax.grpc.ChannelPoolSettings; import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; import com.google.bigtable.v2.*; +import com.google.cloud.bigtable.Version; import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.FakeServiceBuilder; import com.google.cloud.bigtable.data.v2.models.*; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; -import com.google.cloud.bigtable.stats.StatsRecorderWrapperForConnection; import io.grpc.Server; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.View; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import java.util.ArrayList; +import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.concurrent.ScheduledExecutorService; import org.junit.After; import org.junit.Before; @@ -51,25 +63,50 @@ public class ErrorCountPerConnectionTest { private final FakeService fakeService = new FakeService(); private EnhancedBigtableStubSettings.Builder builder; private ArgumentCaptor runnableCaptor; - private StatsRecorderWrapperForConnection statsRecorderWrapperForConnection; + + private InMemoryMetricReader metricReader; + + private Attributes attributes; @Before public void setup() throws Exception { server = FakeServiceBuilder.create(fakeService).start(); ScheduledExecutorService executors = Mockito.mock(ScheduledExecutorService.class); + + attributes = + Attributes.builder() + .put(BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY, "fake-project") + .put(BuiltinMetricsConstants.INSTANCE_ID_KEY, "fake-instance") + .put(BuiltinMetricsConstants.APP_PROFILE_KEY, "") + .put(BuiltinMetricsConstants.CLIENT_NAME_KEY, "bigtable-java/" + Version.VERSION) + .build(); + + metricReader = InMemoryMetricReader.create(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + + for (Map.Entry entry : + BuiltinMetricsConstants.getAllViews().entrySet()) { + meterProvider.registerView(entry.getKey(), entry.getValue()); + } + + OpenTelemetrySdk otel = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + builder = BigtableDataSettings.newBuilderForEmulator(server.getPort()) .stubSettings() .setBackgroundExecutorProvider(FixedExecutorProvider.create(executors)) .setProjectId("fake-project") - .setInstanceId("fake-instance"); + .setInstanceId("fake-instance") + .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(otel)); + runnableCaptor = ArgumentCaptor.forClass(Runnable.class); Mockito.when( executors.scheduleAtFixedRate(runnableCaptor.capture(), anyLong(), anyLong(), any())) .thenReturn(null); - - statsRecorderWrapperForConnection = Mockito.mock(StatsRecorderWrapperForConnection.class); } @After @@ -98,14 +135,21 @@ public void readWithOneChannel() throws Exception { // noop } } - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); + runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(1); - assertThat(allErrorCounts.get(0)).isEqualTo(errorCount); + + Collection allMetrics = metricReader.collectAllMetrics(); + MetricData metricData = + BuiltinMetricsTestUtils.getMetricData( + allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME); + + // Make sure the correct bucket is updated with the correct number of data points + ArrayList histogramPointData = + new ArrayList<>(metricData.getHistogramData().getPoints()); + assertThat(histogramPointData.size()).isEqualTo(1); + HistogramPointData point = histogramPointData.get(0); + int index = findDataPointIndex(point.getBoundaries(), errorCount); + assertThat(point.getCounts().get(index)).isEqualTo(1); } @Test @@ -131,28 +175,35 @@ public void readWithTwoChannels() throws Exception { // noop } } - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(2); - // Requests get assigned to channels using a Round Robin algorithm, so half to each. - assertThat(allErrorCounts).containsExactly(totalErrorCount / 2, totalErrorCount / 2); + long errorCountPerChannel = totalErrorCount / 2; + + Collection allMetrics = metricReader.collectAllMetrics(); + MetricData metricData = + BuiltinMetricsTestUtils.getMetricData( + allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME); + + // The 2 channels should get equal amount of errors, so the totalErrorCount / 2 bucket is + // updated twice. + ArrayList histogramPointData = + new ArrayList<>(metricData.getHistogramData().getPoints()); + assertThat(histogramPointData.size()).isEqualTo(1); + HistogramPointData point = histogramPointData.get(0); + int index = findDataPointIndex(point.getBoundaries(), errorCountPerChannel); + assertThat(point.getCounts().get(index)).isEqualTo(2); } @Test public void readOverTwoPeriods() throws Exception { EnhancedBigtableStub stub = EnhancedBigtableStub.create(builder.build()); - long errorCount = 0; + long errorCount1 = 0; for (int i = 0; i < 20; i++) { Query query; if (i % 3 == 0) { query = Query.create(ERROR_TABLE_NAME); - errorCount += 1; + errorCount1 += 1; } else { query = Query.create(SUCCESS_TABLE_NAME); } @@ -162,16 +213,9 @@ public void readOverTwoPeriods() throws Exception { // noop } } - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); - runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(1); - assertThat(allErrorCounts.get(0)).isEqualTo(errorCount); - errorCount = 0; + runInterceptorTasksAndAssertCount(); + long errorCount2 = 0; for (int i = 0; i < 20; i++) { Query query; @@ -179,7 +223,7 @@ public void readOverTwoPeriods() throws Exception { query = Query.create(SUCCESS_TABLE_NAME); } else { query = Query.create(ERROR_TABLE_NAME); - errorCount += 1; + errorCount2 += 1; } try { stub.readRowsCallable().call(query).iterator().hasNext(); @@ -187,27 +231,22 @@ public void readOverTwoPeriods() throws Exception { // noop } } - errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); + runInterceptorTasksAndAssertCount(); - allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(1); - assertThat(allErrorCounts.get(0)).isEqualTo(errorCount); - } - @Test - public void ignoreInactiveConnection() throws Exception { - EnhancedBigtableStub stub = EnhancedBigtableStub.create(builder.build()); + Collection allMetrics = metricReader.collectAllMetrics(); + MetricData metricData = + BuiltinMetricsTestUtils.getMetricData( + allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME); - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); - runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts).isEmpty(); + ArrayList histogramPointData = + new ArrayList<>(metricData.getHistogramData().getPoints()); + assertThat(histogramPointData.size()).isEqualTo(1); + HistogramPointData point = histogramPointData.get(0); + int index1 = findDataPointIndex(point.getBoundaries(), errorCount1); + int index2 = findDataPointIndex(point.getBoundaries(), errorCount2); + assertThat(point.getCounts().get(index1)).isEqualTo(1); + assertThat(point.getCounts().get(index2)).isEqualTo(1); } @Test @@ -221,22 +260,19 @@ public void noFailedRequests() throws Exception { // noop } } - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(1); - assertThat(allErrorCounts.get(0)).isEqualTo(0); + Collection allMetrics = metricReader.collectAllMetrics(); + MetricData metricData = + BuiltinMetricsTestUtils.getMetricData( + allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME); + long value = BuiltinMetricsTestUtils.getAggregatedValue(metricData, attributes); + assertThat(value).isEqualTo(0); } private void runInterceptorTasksAndAssertCount() { int actualNumOfTasks = 0; for (Runnable runnable : runnableCaptor.getAllValues()) { if (runnable instanceof ErrorCountPerConnectionMetricTracker) { - ((ErrorCountPerConnectionMetricTracker) runnable) - .setStatsRecorderWrapperForConnection(statsRecorderWrapperForConnection); runnable.run(); actualNumOfTasks++; } @@ -244,6 +280,16 @@ private void runInterceptorTasksAndAssertCount() { assertThat(actualNumOfTasks).isEqualTo(1); } + private int findDataPointIndex(List boundaries, long dataPoint) { + int index = 0; + for (; index < boundaries.size(); index++) { + if (boundaries.get(index) >= dataPoint) { + break; + } + } + return index; + } + static class FakeService extends BigtableGrpc.BigtableImplBase { @Override public void readRows( diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java index 15bd9171f0..d72eac4056 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java @@ -39,7 +39,6 @@ import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; -import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor; import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableMap; @@ -120,15 +119,20 @@ public void setUp() throws Exception { .setInstanceId(INSTANCE_ID) .setAppProfileId(APP_PROFILE_ID) .build(); - EnhancedBigtableStubSettings stubSettings = - settings - .getStubSettings() + + ClientContext clientContext = + EnhancedBigtableStub.createClientContext(settings.getStubSettings()); + clientContext = + clientContext .toBuilder() .setTracerFactory( EnhancedBigtableStub.createBigtableTracerFactory( - settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder())) + settings.getStubSettings(), + Tags.getTagger(), + localStats.getStatsRecorder(), + null)) .build(); - stub = new EnhancedBigtableStub(stubSettings, ClientContext.create(stubSettings)); + stub = new EnhancedBigtableStub(settings.getStubSettings(), clientContext); } @After diff --git a/grpc-google-cloud-bigtable-admin-v2/pom.xml b/grpc-google-cloud-bigtable-admin-v2/pom.xml index e7d1f4076b..1ab145fce8 100644 --- a/grpc-google-cloud-bigtable-admin-v2/pom.xml +++ b/grpc-google-cloud-bigtable-admin-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigtable-admin-v2 - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT grpc-google-cloud-bigtable-admin-v2 GRPC library for grpc-google-cloud-bigtable-admin-v2 com.google.cloud google-cloud-bigtable-parent - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT @@ -18,14 +18,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom import com.google.cloud google-cloud-bigtable-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom import diff --git a/grpc-google-cloud-bigtable-v2/pom.xml b/grpc-google-cloud-bigtable-v2/pom.xml index 983bfea873..f0aadd5760 100644 --- a/grpc-google-cloud-bigtable-v2/pom.xml +++ b/grpc-google-cloud-bigtable-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigtable-v2 - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT grpc-google-cloud-bigtable-v2 GRPC library for grpc-google-cloud-bigtable-v2 com.google.cloud google-cloud-bigtable-parent - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT @@ -18,14 +18,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom import com.google.cloud google-cloud-bigtable-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom import diff --git a/pom.xml b/pom.xml index f193e7e852..6406ff41e1 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ google-cloud-bigtable-parent pom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT Google Cloud Bigtable Parent https://github.com/googleapis/java-bigtable @@ -14,7 +14,7 @@ com.google.cloud sdk-platform-java-config - 3.28.1 + 3.29.0 @@ -153,27 +153,27 @@ com.google.api.grpc proto-google-cloud-bigtable-v2 - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT com.google.api.grpc proto-google-cloud-bigtable-admin-v2 - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-bigtable-v2 - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-bigtable-admin-v2 - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT com.google.cloud google-cloud-bigtable - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT @@ -347,22 +347,6 @@ - - - - with-shaded - - - !skip-shaded - - - - google-cloud-bigtable-stats - - diff --git a/proto-google-cloud-bigtable-admin-v2/pom.xml b/proto-google-cloud-bigtable-admin-v2/pom.xml index ee13e371c9..7f05a7c112 100644 --- a/proto-google-cloud-bigtable-admin-v2/pom.xml +++ b/proto-google-cloud-bigtable-admin-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigtable-admin-v2 - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT proto-google-cloud-bigtable-admin-v2 PROTO library for proto-google-cloud-bigtable-admin-v2 com.google.cloud google-cloud-bigtable-parent - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT @@ -18,14 +18,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom import com.google.cloud google-cloud-bigtable-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom import diff --git a/proto-google-cloud-bigtable-v2/pom.xml b/proto-google-cloud-bigtable-v2/pom.xml index 2b0787b6d8..8f2db9b76d 100644 --- a/proto-google-cloud-bigtable-v2/pom.xml +++ b/proto-google-cloud-bigtable-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigtable-v2 - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT proto-google-cloud-bigtable-v2 PROTO library for proto-google-cloud-bigtable-v2 com.google.cloud google-cloud-bigtable-parent - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT @@ -18,14 +18,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom import com.google.cloud google-cloud-bigtable-bom - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT pom import diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml index 48f9dd3756..473ddf3d26 100644 --- a/samples/install-without-bom/pom.xml +++ b/samples/install-without-bom/pom.xml @@ -25,13 +25,15 @@ + com.google.cloud google-cloud-bigtable - 2.36.0 + 2.38.0 + junit diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index 07ee02c1e5..64261c0efb 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -28,7 +28,7 @@ com.google.cloud google-cloud-bigtable - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml index 5a040c9b8c..807f1cecd9 100644 --- a/samples/snippets/pom.xml +++ b/samples/snippets/pom.xml @@ -23,14 +23,13 @@ UTF-8 - com.google.cloud libraries-bom - 26.25.0 + 26.37.0 pom import diff --git a/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java b/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java new file mode 100644 index 0000000000..8f3047442b --- /dev/null +++ b/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java @@ -0,0 +1,322 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigtable; + +import static com.google.cloud.bigtable.data.v2.models.Filters.FILTERS; + +import com.google.api.gax.rpc.NotFoundException; +import com.google.api.gax.rpc.PermissionDeniedException; +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient; +import com.google.cloud.bigtable.admin.v2.BigtableTableAdminSettings; +import com.google.cloud.bigtable.admin.v2.models.AuthorizedView; +import com.google.cloud.bigtable.admin.v2.models.CreateAuthorizedViewRequest; +import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest; +import com.google.cloud.bigtable.admin.v2.models.FamilySubsets; +import com.google.cloud.bigtable.admin.v2.models.SubsetView; +import com.google.cloud.bigtable.admin.v2.models.Table; +import com.google.cloud.bigtable.admin.v2.models.UpdateAuthorizedViewRequest; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; +import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId; +import com.google.cloud.bigtable.data.v2.models.Filters.Filter; +import com.google.cloud.bigtable.data.v2.models.Query; +import com.google.cloud.bigtable.data.v2.models.Row; +import com.google.cloud.bigtable.data.v2.models.RowCell; +import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.protobuf.ByteString; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class AuthorizedViewExample { + + private static final String COLUMN_FAMILY = "cf"; + private static final String COLUMN_QUALIFIER_GREETING = "greeting"; + private static final String COLUMN_QUALIFIER_NAME = "name"; + private static final String ROW_KEY_PREFIX = "rowKey"; + private final String tableId; + private final String authorizedViewId; + private final BigtableTableAdminClient adminClient; + private final BigtableDataClient dataClient; + + public static void main(String[] args) throws IOException { + + if (args.length != 2) { + System.out.println("Missing required project id or instance id"); + return; + } + String projectId = args[0]; + String instanceId = args[1]; + + AuthorizedViewExample authorizedViewExample = + new AuthorizedViewExample(projectId, instanceId, "test-table", "test-authorized-view"); + authorizedViewExample.run(); + } + + public AuthorizedViewExample( + String projectId, String instanceId, String tableId, String authorizedViewId) + throws IOException { + this.tableId = tableId; + this.authorizedViewId = authorizedViewId; + + // Creates the settings to configure a bigtable data client. + BigtableDataSettings settings = + BigtableDataSettings.newBuilder().setProjectId(projectId).setInstanceId(instanceId).build(); + + // Creates a bigtable data client. + dataClient = BigtableDataClient.create(settings); + + // Creates the settings to configure a bigtable table admin client. + BigtableTableAdminSettings adminSettings = + BigtableTableAdminSettings.newBuilder() + .setProjectId(projectId) + .setInstanceId(instanceId) + .build(); + + // Creates a bigtable table admin client. + adminClient = BigtableTableAdminClient.create(adminSettings); + } + + public void close() { + dataClient.close(); + adminClient.close(); + } + + public void run() { + createTable(); + createAuthorizedView(); + updateAuthorizedView(); + getAuthorizedView(); + listAllAuthorizedViews(); + writeToAuthorizedView(); + readSingleRowFromAuthorizedView(); + readRowsWithFilterFromAuthorizedView(); + deleteAuthorizedView(); + deleteTable(); + close(); + } + + public void createTable() { + // Checks if table exists, creates table if it does not exist. + if (!adminClient.exists(tableId)) { + System.out.println("Table does not exist, creating table: " + tableId); + CreateTableRequest createTableRequest = + CreateTableRequest.of(tableId).addFamily(COLUMN_FAMILY); + Table table = adminClient.createTable(createTableRequest); + System.out.printf("Table: %s created successfully%n", table.getId()); + } + } + + public void deleteTable() { + // Deletes the entire table. + System.out.println("\nDelete table: " + tableId); + try { + adminClient.deleteTable(tableId); + System.out.printf("Table: %s deleted successfully%n", tableId); + } catch (NotFoundException e) { + System.err.println("Failed to delete a non-existent table: " + e.getMessage()); + } + } + + /** + * Demonstrates how to create an authorized view under a table with the specified configuration. + */ + public void createAuthorizedView() { + // Checks if the authorized view exists, creates it if it does not exist. + try { + adminClient.getAuthorizedView(tableId, authorizedViewId); + } catch (NotFoundException exception) { + System.out.printf("%nCreating authorized view %s in table %s%n", authorizedViewId, tableId); + // [START bigtable_create_authorized_view] + try { + CreateAuthorizedViewRequest request = + CreateAuthorizedViewRequest.of(tableId, authorizedViewId) + .setAuthorizedViewType( + SubsetView.create() + .addRowPrefix("") + .setFamilySubsets( + COLUMN_FAMILY, + FamilySubsets.create().addQualifierPrefix(COLUMN_QUALIFIER_NAME))); + AuthorizedView authorizedView = adminClient.createAuthorizedView(request); + System.out.printf("AuthorizedView: %s created successfully%n", authorizedView.getId()); + } catch (NotFoundException e) { + System.err.println( + "Failed to create an authorized view from a non-existent table: " + e.getMessage()); + } + // [END bigtable_create_authorized_view] + } + } + + /** Demonstrates how to modify an authorized view. */ + public void updateAuthorizedView() { + System.out.printf("%nUpdating authorized view %s in table %s%n", authorizedViewId, tableId); + // [START bigtable_update_authorized_view] + try { + // Update to an authorized view permitting everything. + UpdateAuthorizedViewRequest request = + UpdateAuthorizedViewRequest.of(tableId, authorizedViewId) + .setAuthorizedViewType( + SubsetView.create() + .addRowPrefix("") + .setFamilySubsets( + COLUMN_FAMILY, FamilySubsets.create().addQualifierPrefix(""))); + AuthorizedView authorizedView = adminClient.updateAuthorizedView(request); + System.out.printf("AuthorizedView: %s updated successfully%n", authorizedView.getId()); + } catch (NotFoundException e) { + System.err.println("Failed to modify a non-existent authorized view: " + e.getMessage()); + } + // [END bigtable_update_authorized_view] + } + + /** Demonstrates how to get an authorized view's metadata. */ + public AuthorizedView getAuthorizedView() { + System.out.printf("%nGetting authorized view %s in table %s%n", authorizedViewId, tableId); + // [START bigtable_get_authorized_view] + AuthorizedView authorizedView = null; + try { + authorizedView = adminClient.getAuthorizedView(tableId, authorizedViewId); + SubsetView subsetView = (SubsetView) authorizedView.getAuthorizedViewType(); + + for (ByteString rowPrefix : subsetView.getRowPrefixes()) { + System.out.printf("Row Prefix: %s%n", rowPrefix.toStringUtf8()); + } + for (Map.Entry entry : subsetView.getFamilySubsets().entrySet()) { + for (ByteString qualifierPrefix : entry.getValue().getQualifierPrefixes()) { + System.out.printf( + "Column Family: %s, Qualifier Prefix: %s%n", + entry.getKey(), qualifierPrefix.toStringUtf8()); + } + for (ByteString qualifier : entry.getValue().getQualifiers()) { + System.out.printf( + "Column Family: %s, Qualifier: %s%n", entry.getKey(), qualifier.toStringUtf8()); + } + } + } catch (NotFoundException e) { + System.err.println( + "Failed to retrieve metadata from a non-existent authorized view: " + e.getMessage()); + } + // [END bigtable_get_authorized_view] + return authorizedView; + } + + /** Demonstrates how to list all authorized views within a table. */ + public List listAllAuthorizedViews() { + System.out.printf("%nListing authorized views in table %s%n", tableId); + // [START bigtable_list_authorized_views] + List authorizedViewIds = new ArrayList<>(); + try { + authorizedViewIds = adminClient.listAuthorizedViews(tableId); + for (String authorizedViewId : authorizedViewIds) { + System.out.println(authorizedViewId); + } + } catch (NotFoundException e) { + System.err.println( + "Failed to list authorized views from a non-existent table: " + e.getMessage()); + } + // [END bigtable_list_authorized_views] + return authorizedViewIds; + } + + /** Demonstrates how to delete an authorized view. */ + public void deleteAuthorizedView() { + System.out.printf("%nDeleting authorized view %s in table %s%n", authorizedViewId, tableId); + // [START bigtable_delete_authorized_view] + try { + adminClient.deleteAuthorizedView(tableId, authorizedViewId); + System.out.printf("AuthorizedView: %s deleted successfully%n", authorizedViewId); + } catch (NotFoundException e) { + System.err.println("Failed to delete a non-existent authorized view: " + e.getMessage()); + } + // [END bigtable_delete_authorized_view] + } + + /** Demonstrates how to write some rows to an authorized view. */ + public void writeToAuthorizedView() { + // [START bigtable_authorized_view_write_rows] + try { + System.out.println("\nWriting to authorized view"); + String[] names = {"World", "Bigtable", "Java"}; + for (int i = 0; i < names.length; i++) { + String greeting = "Hello " + names[i] + "!"; + RowMutation rowMutation = + RowMutation.create(AuthorizedViewId.of(tableId, authorizedViewId), ROW_KEY_PREFIX + i) + .setCell(COLUMN_FAMILY, COLUMN_QUALIFIER_NAME, names[i]) + .setCell(COLUMN_FAMILY, COLUMN_QUALIFIER_GREETING, greeting); + dataClient.mutateRow(rowMutation); + System.out.println(greeting); + } + } catch (Exception e) { + if (e instanceof NotFoundException) { + System.err.println("Failed to write to non-existent authorized view: " + e.getMessage()); + } else if (e instanceof PermissionDeniedException) { + System.err.println( + "Failed to apply mutations outside of the authorized view: " + e.getMessage()); + } + } + // [END bigtable_authorized_view_write_rows] + } + + /** Demonstrates how to read a single row from an authorized view. */ + public Row readSingleRowFromAuthorizedView() { + // [START bigtable_authorized_view_get_by_key] + try { + System.out.println("\nReading a single row by row key from an authorized view"); + Row row = + dataClient.readRow(AuthorizedViewId.of(tableId, authorizedViewId), ROW_KEY_PREFIX + 0); + System.out.println("Row: " + row.getKey().toStringUtf8()); + for (RowCell cell : row.getCells()) { + System.out.printf( + "Family: %s Qualifier: %s Value: %s%n", + cell.getFamily(), cell.getQualifier().toStringUtf8(), cell.getValue().toStringUtf8()); + } + return row; + } catch (NotFoundException e) { + System.err.println("Failed to read from a non-existent authorized view: " + e.getMessage()); + return null; + } + // [END bigtable_authorized_view_get_by_key] + } + + /** Demonstrates how to read rows from an authorized view with a filter. */ + public List readRowsWithFilterFromAuthorizedView() { + // [START bigtable_authorized_view_scan_with_filter] + try { + // A filter that matches only the most recent cell within each column + Filter filter = FILTERS.limit().cellsPerColumn(1); + System.out.println("\nScanning authorized view with filter"); + Query query = Query.create(AuthorizedViewId.of(tableId, authorizedViewId)).filter(filter); + ServerStream rowStream = dataClient.readRows(query); + List authorizedViewRows = new ArrayList<>(); + for (Row r : rowStream) { + System.out.println("Row Key: " + r.getKey().toStringUtf8()); + authorizedViewRows.add(r); + for (RowCell cell : r.getCells()) { + System.out.printf( + "Family: %s Qualifier: %s Value: %s%n", + cell.getFamily(), cell.getQualifier().toStringUtf8(), cell.getValue().toStringUtf8()); + } + } + return authorizedViewRows; + } catch (NotFoundException e) { + System.err.println("Failed to read a non-existent authorized view: " + e.getMessage()); + return null; + } + // [END bigtable_authorized_view_scan_with_filter] + } +} diff --git a/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java b/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java index d1f546c342..99bc25735d 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java +++ b/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java @@ -156,7 +156,7 @@ public Row readSingleRow() { // [START bigtable_hw_get_by_key] try { System.out.println("\nReading a single row by row key"); - Row row = dataClient.readRow(tableId, ROW_KEY_PREFIX + 0); + Row row = dataClient.readRow(TableId.of(tableId), ROW_KEY_PREFIX + 0); System.out.println("Row: " + row.getKey().toStringUtf8()); for (RowCell cell : row.getCells()) { System.out.printf( diff --git a/samples/snippets/src/main/java/com/example/bigtable/InstanceAdminExample.java b/samples/snippets/src/main/java/com/example/bigtable/InstanceAdminExample.java index 0bdae948d2..df813ace39 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/InstanceAdminExample.java +++ b/samples/snippets/src/main/java/com/example/bigtable/InstanceAdminExample.java @@ -87,6 +87,11 @@ public void run() { addCluster(); deleteCluster(); deleteInstance(); + close(); + } + + // Close the client + void close() { adminClient.close(); } diff --git a/samples/snippets/src/main/java/com/example/bigtable/TableAdminExample.java b/samples/snippets/src/main/java/com/example/bigtable/TableAdminExample.java index 9842658a82..5f804153a1 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/TableAdminExample.java +++ b/samples/snippets/src/main/java/com/example/bigtable/TableAdminExample.java @@ -108,6 +108,11 @@ public void run() { printModifiedColumnFamily(); deleteColumnFamily(); deleteTable(); + close(); + } + + // Close the client + void close() { adminClient.close(); } diff --git a/samples/snippets/src/test/java/com/example/bigtable/AuthorizedViewExampleTest.java b/samples/snippets/src/test/java/com/example/bigtable/AuthorizedViewExampleTest.java new file mode 100644 index 0000000000..5990d66107 --- /dev/null +++ b/samples/snippets/src/test/java/com/example/bigtable/AuthorizedViewExampleTest.java @@ -0,0 +1,210 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigtable; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; + +import com.google.api.gax.rpc.NotFoundException; +import com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient; +import com.google.cloud.bigtable.admin.v2.BigtableTableAdminSettings; +import com.google.cloud.bigtable.admin.v2.models.AuthorizedView; +import com.google.cloud.bigtable.admin.v2.models.CreateAuthorizedViewRequest; +import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest; +import com.google.cloud.bigtable.admin.v2.models.FamilySubsets; +import com.google.cloud.bigtable.admin.v2.models.SubsetView; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; +import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId; +import com.google.cloud.bigtable.data.v2.models.Row; +import com.google.cloud.bigtable.data.v2.models.RowCell; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public class AuthorizedViewExampleTest extends BigtableBaseTest { + + private static final String TABLE_PREFIX = "table"; + private static final String AUTHORIZED_VIEW_PREFIX = "authorized-view"; + private static final String COLUMN_FAMILY = "cf"; + private String tableId; + private String authorizedViewId; + private static BigtableDataClient dataClient; + private static BigtableTableAdminClient adminClient; + private AuthorizedViewExample authorizedViewExample; + + @BeforeClass + public static void beforeClass() throws IOException { + initializeVariables(); + BigtableDataSettings settings = + BigtableDataSettings.newBuilder().setProjectId(projectId).setInstanceId(instanceId).build(); + dataClient = BigtableDataClient.create(settings); + BigtableTableAdminSettings adminSettings = + BigtableTableAdminSettings.newBuilder() + .setProjectId(projectId) + .setInstanceId(instanceId) + .build(); + adminClient = BigtableTableAdminClient.create(adminSettings); + } + + @AfterClass + public static void afterClass() { + garbageCollect(); + dataClient.close(); + adminClient.close(); + } + + @Before + public void setup() throws IOException { + tableId = generateResourceId(TABLE_PREFIX); + authorizedViewId = generateResourceId(AUTHORIZED_VIEW_PREFIX); + authorizedViewExample = + new AuthorizedViewExample(projectId, instanceId, tableId, authorizedViewId); + adminClient.createTable(CreateTableRequest.of(tableId).addFamily(COLUMN_FAMILY)); + adminClient.createAuthorizedView( + CreateAuthorizedViewRequest.of(tableId, authorizedViewId) + .setAuthorizedViewType( + SubsetView.create() + .addRowPrefix("") + .setFamilySubsets( + COLUMN_FAMILY, FamilySubsets.create().addQualifierPrefix("")))); + } + + @After + public void after() { + if (adminClient.exists(tableId)) { + // Deleting a table also deletes all the authorized views inside it. + adminClient.deleteTable(tableId); + } + authorizedViewExample.close(); + } + + @Test + public void testRunDoesNotFail() { + authorizedViewExample.run(); + } + + @Test + public void testAuthorizedViewCreateUpdateDelete() throws IOException { + // Creates an authorized view. + String testAuthorizedViewId = generateResourceId(AUTHORIZED_VIEW_PREFIX); + AuthorizedViewExample testAuthorizedViewExample = + new AuthorizedViewExample(projectId, instanceId, tableId, testAuthorizedViewId); + testAuthorizedViewExample.createAuthorizedView(); + AuthorizedView authorizedView = adminClient.getAuthorizedView(tableId, testAuthorizedViewId); + assertEquals(authorizedView.getId(), testAuthorizedViewId); + + // Updates the authorized view. + testAuthorizedViewExample.updateAuthorizedView(); + AuthorizedView updatedAuthorizedView = + adminClient.getAuthorizedView(tableId, testAuthorizedViewId); + assertNotEquals(authorizedView, updatedAuthorizedView); + + // Deletes the authorized view. + testAuthorizedViewExample.deleteAuthorizedView(); + assertThrows( + NotFoundException.class, + () -> adminClient.getAuthorizedView(tableId, testAuthorizedViewId)); + + testAuthorizedViewExample.close(); + } + + @Test + public void testGetAuthorizedView() { + AuthorizedView authorizedView = authorizedViewExample.getAuthorizedView(); + assertNotNull(authorizedView); + assertEquals(authorizedView.getId(), authorizedViewId); + } + + @Test + public void testListAuthorizedView() { + List authorizedViewIds = authorizedViewExample.listAllAuthorizedViews(); + assertEquals(authorizedViewIds.size(), 1); + assertEquals(authorizedViewIds.get(0), authorizedViewId); + } + + @Test + public void testWriteToAuthorizedView() { + assertNull(dataClient.readRow(AuthorizedViewId.of(tableId, authorizedViewId), "rowKey0")); + authorizedViewExample.writeToAuthorizedView(); + assertNotNull(dataClient.readRow(AuthorizedViewId.of(tableId, authorizedViewId), "rowKey0")); + } + + @Test + public void testReadsFromAuthorizedView() { + authorizedViewExample.writeToAuthorizedView(); + + Row actualRow = authorizedViewExample.readSingleRowFromAuthorizedView(); + assertEquals("rowKey0", actualRow.getKey().toStringUtf8()); + assertEquals(2, actualRow.getCells().size()); + assertEquals("Hello World!", actualRow.getCells().get(0).getValue().toStringUtf8()); + assertEquals("World", actualRow.getCells().get(1).getValue().toStringUtf8()); + + List rows = authorizedViewExample.readRowsWithFilterFromAuthorizedView(); + List printedRows = new ArrayList<>(); + for (Row row : rows) { + for (RowCell cell : row.getCells()) { + printedRows.add( + String.format( + "%s_%s_%s:%s", + row.getKey().toStringUtf8(), + cell.getFamily(), + cell.getQualifier().toStringUtf8(), + cell.getValue().toStringUtf8())); + } + } + String[] expectedRows = + new String[] { + "rowKey0_cf_greeting:Hello World!", + "rowKey0_cf_name:World", + "rowKey1_cf_greeting:Hello Bigtable!", + "rowKey1_cf_name:Bigtable", + "rowKey2_cf_greeting:Hello Java!", + "rowKey2_cf_name:Java" + }; + assertEquals(printedRows, Arrays.asList(expectedRows)); + } + + private static void garbageCollect() { + Pattern timestampPattern = Pattern.compile(TABLE_PREFIX + "-([0-9a-f]+)-([0-9a-f]+)"); + for (String tableId : adminClient.listTables()) { + Matcher matcher = timestampPattern.matcher(tableId); + if (!matcher.matches()) { + continue; + } + String timestampStr = matcher.group(1); + long timestamp = Long.parseLong(timestampStr, 16); + if (System.currentTimeMillis() - timestamp < TimeUnit.MINUTES.toMillis(10)) { + continue; + } + System.out.println("\nGarbage collecting orphaned table: " + tableId); + adminClient.deleteTable(tableId); + } + } +} diff --git a/samples/snippets/src/test/java/com/example/bigtable/BigtableBaseTest.java b/samples/snippets/src/test/java/com/example/bigtable/BigtableBaseTest.java index 8845c587ba..5a4475e898 100644 --- a/samples/snippets/src/test/java/com/example/bigtable/BigtableBaseTest.java +++ b/samples/snippets/src/test/java/com/example/bigtable/BigtableBaseTest.java @@ -55,7 +55,7 @@ public void tearDown() { bout.reset(); } - public static String generateTableId(String prefix) { + public static String generateResourceId(String prefix) { return prefix + "-" + UUID.randomUUID().toString().substring(0, 20); } diff --git a/samples/snippets/src/test/java/com/example/bigtable/InstanceAdminExampleTest.java b/samples/snippets/src/test/java/com/example/bigtable/InstanceAdminExampleTest.java index dc66b2f9a2..15df1f8fa5 100644 --- a/samples/snippets/src/test/java/com/example/bigtable/InstanceAdminExampleTest.java +++ b/samples/snippets/src/test/java/com/example/bigtable/InstanceAdminExampleTest.java @@ -78,6 +78,9 @@ public void after() { if (adminClient.exists(instanceId)) { adminClient.deleteInstance(instanceId); } + if (instanceAdmin != null) { + instanceAdmin.close(); + } } @Test diff --git a/samples/snippets/src/test/java/com/example/bigtable/MobileTimeSeriesBaseTest.java b/samples/snippets/src/test/java/com/example/bigtable/MobileTimeSeriesBaseTest.java index 1eb9dd0ae6..98182187a3 100644 --- a/samples/snippets/src/test/java/com/example/bigtable/MobileTimeSeriesBaseTest.java +++ b/samples/snippets/src/test/java/com/example/bigtable/MobileTimeSeriesBaseTest.java @@ -29,7 +29,7 @@ public class MobileTimeSeriesBaseTest extends BigtableBaseTest { - public static final String TABLE_ID = generateTableId("mobile-time-series"); + public static final String TABLE_ID = generateResourceId("mobile-time-series"); public static final String COLUMN_FAMILY_NAME_STATS = "stats_summary"; public static final String COLUMN_FAMILY_NAME_PLAN = "cell_plan"; public static final Instant CURRENT_TIME = Instant.now(); diff --git a/samples/snippets/src/test/java/com/example/bigtable/TableAdminExampleTest.java b/samples/snippets/src/test/java/com/example/bigtable/TableAdminExampleTest.java index 391764b549..d4fd4de304 100644 --- a/samples/snippets/src/test/java/com/example/bigtable/TableAdminExampleTest.java +++ b/samples/snippets/src/test/java/com/example/bigtable/TableAdminExampleTest.java @@ -67,7 +67,7 @@ public static void afterClass() { @Before public void setup() throws IOException { - tableId = generateTableId(TABLE_PREFIX); + tableId = generateResourceId(TABLE_PREFIX); tableAdmin = new TableAdminExample(projectId, instanceId, tableId); adminClient.createTable(CreateTableRequest.of(tableId).addFamily("cf")); } @@ -77,12 +77,15 @@ public void after() { if (adminClient.exists(tableId)) { adminClient.deleteTable(tableId); } + if (tableAdmin != null) { + tableAdmin.close(); + } } @Test public void testCreateAndDeleteTable() throws IOException { // Creates a table. - String testTable = generateTableId(TABLE_PREFIX); + String testTable = generateResourceId(TABLE_PREFIX); TableAdminExample testTableAdmin = new TableAdminExample(projectId, instanceId, testTable); testTableAdmin.createTable(); assertTrue(adminClient.exists(testTable)); diff --git a/test-proxy/pom.xml b/test-proxy/pom.xml index a7a0417369..d18e57f64b 100644 --- a/test-proxy/pom.xml +++ b/test-proxy/pom.xml @@ -12,11 +12,11 @@ google-cloud-bigtable-parent com.google.cloud - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT - 2.37.1-SNAPSHOT + 2.38.1-SNAPSHOT diff --git a/versions.txt b/versions.txt index d5c1e3d879..a63166148c 100644 --- a/versions.txt +++ b/versions.txt @@ -1,10 +1,10 @@ # Format: # module:released-version:current-version -google-cloud-bigtable:2.37.0:2.37.1-SNAPSHOT -grpc-google-cloud-bigtable-admin-v2:2.37.0:2.37.1-SNAPSHOT -grpc-google-cloud-bigtable-v2:2.37.0:2.37.1-SNAPSHOT -proto-google-cloud-bigtable-admin-v2:2.37.0:2.37.1-SNAPSHOT -proto-google-cloud-bigtable-v2:2.37.0:2.37.1-SNAPSHOT -google-cloud-bigtable-emulator:0.174.0:0.174.1-SNAPSHOT -google-cloud-bigtable-emulator-core:0.174.0:0.174.1-SNAPSHOT +google-cloud-bigtable:2.38.0:2.38.1-SNAPSHOT +grpc-google-cloud-bigtable-admin-v2:2.38.0:2.38.1-SNAPSHOT +grpc-google-cloud-bigtable-v2:2.38.0:2.38.1-SNAPSHOT +proto-google-cloud-bigtable-admin-v2:2.38.0:2.38.1-SNAPSHOT +proto-google-cloud-bigtable-v2:2.38.0:2.38.1-SNAPSHOT +google-cloud-bigtable-emulator:0.175.0:0.175.1-SNAPSHOT +google-cloud-bigtable-emulator-core:0.175.0:0.175.1-SNAPSHOT