com.google.truth
truth
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java
index a6aea08983..8a0f3cab40 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java
@@ -20,6 +20,6 @@
@InternalApi("For internal use only")
public final class Version {
// {x-version-update-start:google-cloud-bigtable:current}
- public static String VERSION = "2.37.1-SNAPSHOT";
+ public static String VERSION = "2.38.1-SNAPSHOT";
// {x-version-update-end}
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/AppProfile.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/AppProfile.java
index 2dd75dd5ad..bd7a534640 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/AppProfile.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/AppProfile.java
@@ -16,6 +16,7 @@
package com.google.cloud.bigtable.admin.v2.models;
import com.google.api.core.InternalApi;
+import com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly;
import com.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny;
import com.google.bigtable.admin.v2.AppProfile.Priority;
import com.google.bigtable.admin.v2.AppProfile.StandardIsolation;
@@ -81,6 +82,8 @@ public RoutingPolicy getPolicy() {
public IsolationPolicy getIsolationPolicy() {
if (proto.hasStandardIsolation()) {
return new StandardIsolationPolicy(proto.getStandardIsolation());
+ } else if (proto.hasDataBoostIsolationReadOnly()) {
+ return new DataBoostIsolationReadOnlyPolicy(proto.getDataBoostIsolationReadOnly());
} else {
// Should never happen because the constructor verifies that one must exist.
throw new IllegalStateException();
@@ -409,4 +412,105 @@ public int hashCode() {
return Objects.hashCode(proto);
}
}
+
+ /** Compute Billing Owner specifies how usage should be accounted when using Data Boost. */
+ public static enum ComputeBillingOwner {
+ UNSPECIFIED(DataBoostIsolationReadOnly.ComputeBillingOwner.COMPUTE_BILLING_OWNER_UNSPECIFIED),
+ HOST_PAYS(DataBoostIsolationReadOnly.ComputeBillingOwner.HOST_PAYS),
+ UNRECOGNIZED(DataBoostIsolationReadOnly.ComputeBillingOwner.UNRECOGNIZED);
+
+ private final com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ .ComputeBillingOwner
+ proto;
+
+ /**
+ * Wraps the protobuf. This method is considered an internal implementation detail and not meant
+ * to be used by applications.
+ */
+ @InternalApi
+ public static ComputeBillingOwner fromProto(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
+ proto) {
+ Preconditions.checkNotNull(proto);
+
+ for (ComputeBillingOwner owner : values()) {
+ if (owner.proto.equals(proto)) {
+ return owner;
+ }
+ }
+
+ return UNRECOGNIZED;
+ }
+
+ /**
+ * Creates the request protobuf. This method is considered an internal implementation detail and
+ * not meant to be used by applications.
+ */
+ @InternalApi
+ public DataBoostIsolationReadOnly.ComputeBillingOwner toProto() {
+ return proto;
+ }
+
+ ComputeBillingOwner(DataBoostIsolationReadOnly.ComputeBillingOwner proto) {
+ this.proto = proto;
+ }
+ }
+
+ /**
+ * A Data Boost Read Only {@link IsolationPolicy} for running high-throughput read traffic on your
+ * Bigtable data without affecting application traffic. Data Boost App Profile needs to be created
+ * with a ComputeBillingOwner which specifies how usage should be accounted when using Data Boost.
+ */
+ public static class DataBoostIsolationReadOnlyPolicy implements IsolationPolicy {
+ private final DataBoostIsolationReadOnly proto;
+
+ DataBoostIsolationReadOnlyPolicy(DataBoostIsolationReadOnly proto) {
+ this.proto = proto;
+ }
+
+ /**
+ * Creates a new instance of {@link DataBoostIsolationReadOnlyPolicy} with specified {@link
+ * ComputeBillingOwner}.
+ */
+ public static DataBoostIsolationReadOnlyPolicy of(ComputeBillingOwner billingOwner) {
+ return new DataBoostIsolationReadOnlyPolicy(
+ DataBoostIsolationReadOnly.newBuilder()
+ .setComputeBillingOwner(billingOwner.toProto())
+ .build());
+ }
+
+ /**
+ * Gets the {@link ComputeBillingOwner} on the current {@link DataBoostIsolationReadOnlyPolicy}
+ * instance.
+ */
+ public ComputeBillingOwner getComputeBillingOwner() {
+ return ComputeBillingOwner.fromProto(proto.getComputeBillingOwner());
+ }
+
+ /**
+ * Creates the request protobuf. This method is considered an internal implementation detail and
+ * not meant to be used by applications.
+ */
+ @InternalApi
+ public DataBoostIsolationReadOnly toProto() {
+ return proto;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ DataBoostIsolationReadOnlyPolicy that = (DataBoostIsolationReadOnlyPolicy) o;
+ return Objects.equal(proto, that.proto);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(proto);
+ }
+ }
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequest.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequest.java
index b3159c3146..2ad236c07b 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequest.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequest.java
@@ -97,11 +97,15 @@ public CreateAppProfileRequest setRoutingPolicy(RoutingPolicy routingPolicy) {
/** Sets the isolation policy for all read/write requests that use this app profile. */
public CreateAppProfileRequest setIsolationPolicy(IsolationPolicy isolationPolicy) {
Preconditions.checkNotNull(isolationPolicy);
-
if (isolationPolicy instanceof StandardIsolationPolicy) {
proto
.getAppProfileBuilder()
.setStandardIsolation(((StandardIsolationPolicy) isolationPolicy).toProto());
+ } else if (isolationPolicy instanceof AppProfile.DataBoostIsolationReadOnlyPolicy) {
+ proto
+ .getAppProfileBuilder()
+ .setDataBoostIsolationReadOnly(
+ ((AppProfile.DataBoostIsolationReadOnlyPolicy) isolationPolicy).toProto());
} else {
throw new IllegalArgumentException("Unknown policy type: " + isolationPolicy);
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequest.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequest.java
index b9a45a6f78..b5e14f4f2a 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequest.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequest.java
@@ -17,6 +17,7 @@
import com.google.api.core.InternalApi;
import com.google.cloud.bigtable.admin.v2.internal.NameUtil;
+import com.google.cloud.bigtable.admin.v2.models.AppProfile.DataBoostIsolationReadOnlyPolicy;
import com.google.cloud.bigtable.admin.v2.models.AppProfile.IsolationPolicy;
import com.google.cloud.bigtable.admin.v2.models.AppProfile.MultiClusterRoutingPolicy;
import com.google.cloud.bigtable.admin.v2.models.AppProfile.RoutingPolicy;
@@ -132,6 +133,13 @@ public UpdateAppProfileRequest setIsolationPolicy(@Nonnull IsolationPolicy isola
.getAppProfileBuilder()
.setStandardIsolation(((StandardIsolationPolicy) isolationPolicy).toProto());
updateFieldMask(com.google.bigtable.admin.v2.AppProfile.STANDARD_ISOLATION_FIELD_NUMBER);
+ } else if (isolationPolicy instanceof DataBoostIsolationReadOnlyPolicy) {
+ proto
+ .getAppProfileBuilder()
+ .setDataBoostIsolationReadOnly(
+ ((DataBoostIsolationReadOnlyPolicy) isolationPolicy).toProto());
+ updateFieldMask(
+ com.google.bigtable.admin.v2.AppProfile.DATA_BOOST_ISOLATION_READ_ONLY_FIELD_NUMBER);
} else {
throw new IllegalArgumentException("Unknown policy type: " + isolationPolicy);
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java
index c35500a189..9b2f2e345f 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java
@@ -19,7 +19,10 @@
import com.google.api.gax.core.BackgroundResource;
import com.google.api.gax.rpc.ClientContext;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
+import io.opentelemetry.api.OpenTelemetry;
import java.io.IOException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
import javax.annotation.Nonnull;
/**
@@ -62,8 +65,12 @@
*/
@BetaApi("This feature is currently experimental and can change in the future")
public final class BigtableDataClientFactory implements AutoCloseable {
+
+ private static final Logger logger = Logger.getLogger(BigtableDataClientFactory.class.getName());
+
private final BigtableDataSettings defaultSettings;
private final ClientContext sharedClientContext;
+ private final OpenTelemetry openTelemetry;
/**
* Create a instance of this factory.
@@ -75,13 +82,28 @@ public static BigtableDataClientFactory create(BigtableDataSettings defaultSetti
throws IOException {
ClientContext sharedClientContext =
EnhancedBigtableStub.createClientContext(defaultSettings.getStubSettings());
- return new BigtableDataClientFactory(sharedClientContext, defaultSettings);
+ OpenTelemetry openTelemetry = null;
+ try {
+ // We don't want client side metrics to crash the client, so catch any exception when getting
+ // the OTEL instance and log the exception instead.
+ openTelemetry =
+ EnhancedBigtableStub.getOpenTelemetry(
+ defaultSettings.getProjectId(),
+ defaultSettings.getMetricsProvider(),
+ sharedClientContext.getCredentials());
+ } catch (Throwable t) {
+ logger.log(Level.WARNING, "Failed to get OTEL, will skip exporting client side metrics", t);
+ }
+ return new BigtableDataClientFactory(sharedClientContext, defaultSettings, openTelemetry);
}
private BigtableDataClientFactory(
- ClientContext sharedClientContext, BigtableDataSettings defaultSettings) {
+ ClientContext sharedClientContext,
+ BigtableDataSettings defaultSettings,
+ OpenTelemetry openTelemetry) {
this.sharedClientContext = sharedClientContext;
this.defaultSettings = defaultSettings;
+ this.openTelemetry = openTelemetry;
}
/**
@@ -112,7 +134,7 @@ public BigtableDataClient createDefault() {
.toBuilder()
.setTracerFactory(
EnhancedBigtableStub.createBigtableTracerFactory(
- defaultSettings.getStubSettings()))
+ defaultSettings.getStubSettings(), openTelemetry))
.build();
return BigtableDataClient.createWithClientContext(defaultSettings, clientContext);
@@ -140,7 +162,8 @@ public BigtableDataClient createForAppProfile(@Nonnull String appProfileId) thro
sharedClientContext
.toBuilder()
.setTracerFactory(
- EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings()))
+ EnhancedBigtableStub.createBigtableTracerFactory(
+ settings.getStubSettings(), openTelemetry))
.build();
return BigtableDataClient.createWithClientContext(settings, clientContext);
}
@@ -168,7 +191,8 @@ public BigtableDataClient createForInstance(@Nonnull String projectId, @Nonnull
sharedClientContext
.toBuilder()
.setTracerFactory(
- EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings()))
+ EnhancedBigtableStub.createBigtableTracerFactory(
+ settings.getStubSettings(), openTelemetry))
.build();
return BigtableDataClient.createWithClientContext(settings, clientContext);
@@ -197,7 +221,8 @@ public BigtableDataClient createForInstance(
sharedClientContext
.toBuilder()
.setTracerFactory(
- EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings()))
+ EnhancedBigtableStub.createBigtableTracerFactory(
+ settings.getStubSettings(), openTelemetry))
.build();
return BigtableDataClient.createWithClientContext(settings, clientContext);
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java
index 701a5e8e49..928159aa6d 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java
@@ -25,19 +25,16 @@
import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider;
import com.google.api.gax.rpc.UnaryCallSettings;
import com.google.auth.Credentials;
-import com.google.auth.oauth2.GoogleCredentials;
import com.google.cloud.bigtable.data.v2.models.Query;
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.stub.BigtableBatchingCallSettings;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
-import com.google.cloud.bigtable.stats.BigtableStackdriverStatsExporter;
-import com.google.cloud.bigtable.stats.BuiltinViews;
+import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider;
import com.google.common.base.MoreObjects;
import com.google.common.base.Strings;
import io.grpc.ManagedChannelBuilder;
import java.io.IOException;
import java.util.List;
-import java.util.concurrent.atomic.AtomicBoolean;
import java.util.logging.Logger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
@@ -77,7 +74,10 @@ public final class BigtableDataSettings {
private static final Logger LOGGER = Logger.getLogger(BigtableDataSettings.class.getName());
private static final String BIGTABLE_EMULATOR_HOST_ENV_VAR = "BIGTABLE_EMULATOR_HOST";
- private static final AtomicBoolean BUILTIN_METRICS_REGISTERED = new AtomicBoolean(false);
+ // This is the legacy credential override used in the deprecated enableBuiltinMetrics method to
+ // override the default credentials set on the Bigtable client. Keeping it for backward
+ // compatibility.
+ @Deprecated @Nullable private static Credentials legacyMetricCredentialOverride;
private final EnhancedBigtableStubSettings stubSettings;
@@ -197,23 +197,34 @@ public static void enableGfeOpenCensusStats() {
com.google.cloud.bigtable.data.v2.stub.metrics.RpcViews.registerBigtableClientGfeViews();
}
- /** Register built in metrics. */
- public static void enableBuiltinMetrics() throws IOException {
- if (BUILTIN_METRICS_REGISTERED.compareAndSet(false, true)) {
- BuiltinViews.registerBigtableBuiltinViews();
- BigtableStackdriverStatsExporter.register(GoogleCredentials.getApplicationDefault());
- }
- }
+ /**
+ * Register built in metrics.
+ *
+ * @deprecated This is a no-op that doesn't do anything. Builtin metrics are enabled by default
+ * now. Please refer to {@link
+ * BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)} on how to enable or
+ * disable built-in metrics.
+ */
+ @Deprecated
+ public static void enableBuiltinMetrics() throws IOException {}
/**
* Register built in metrics with credentials. The credentials need to have metric write access
* for all the projects you're publishing to.
+ *
+ * @deprecated This is a no-op that doesn't do anything. Builtin metrics are enabled by default
+ * now. Please refer {@link BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)}
+ * on how to enable or disable built-in metrics.
*/
+ @Deprecated
public static void enableBuiltinMetrics(Credentials credentials) throws IOException {
- if (BUILTIN_METRICS_REGISTERED.compareAndSet(false, true)) {
- BuiltinViews.registerBigtableBuiltinViews();
- BigtableStackdriverStatsExporter.register(credentials);
- }
+ BigtableDataSettings.legacyMetricCredentialOverride = credentials;
+ }
+
+ /** Get the metrics credentials if it's set by {@link #enableBuiltinMetrics(Credentials)}. */
+ @InternalApi
+ public static Credentials getMetricsCredentials() {
+ return legacyMetricCredentialOverride;
}
/** Returns the target project id. */
@@ -278,6 +289,11 @@ public boolean isBulkMutationFlowControlEnabled() {
return stubSettings.bulkMutateRowsSettings().isServerInitiatedFlowControlEnabled();
}
+ /** Gets the {@link MetricsProvider}. * */
+ public MetricsProvider getMetricsProvider() {
+ return stubSettings.getMetricsProvider();
+ }
+
/** Returns the underlying RPC settings. */
public EnhancedBigtableStubSettings getStubSettings() {
return stubSettings;
@@ -527,6 +543,30 @@ public boolean isBulkMutationFlowControlEnabled() {
return stubSettings.bulkMutateRowsSettings().isServerInitiatedFlowControlEnabled();
}
+ /**
+ * Sets the {@link MetricsProvider}.
+ *
+ * By default, this is set to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider#INSTANCE} which will
+ * collect and export client side metrics.
+ *
+ *
To disable client side metrics, set it to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider#INSTANCE}.
+ *
+ *
To use a custom OpenTelemetry instance, refer to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider} on how to
+ * set it up.
+ */
+ public Builder setMetricsProvider(MetricsProvider metricsProvider) {
+ stubSettings.setMetricsProvider(metricsProvider);
+ return this;
+ }
+
+ /** Gets the {@link MetricsProvider}. */
+ public MetricsProvider getMetricsProvider() {
+ return stubSettings.getMetricsProvider();
+ }
+
/**
* Returns the underlying settings for making RPC calls. The settings should be changed with
* care.
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java
index ec15c4131a..57d9748cca 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java
@@ -15,6 +15,11 @@
*/
package com.google.cloud.bigtable.data.v2.stub;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY;
+
import com.google.api.core.ApiFunction;
import com.google.api.core.BetaApi;
import com.google.api.core.InternalApi;
@@ -68,6 +73,7 @@
import com.google.bigtable.v2.RowRange;
import com.google.bigtable.v2.SampleRowKeysResponse;
import com.google.cloud.bigtable.Version;
+import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.internal.JwtCredentialsWithAudience;
import com.google.cloud.bigtable.data.v2.internal.NameUtil;
import com.google.cloud.bigtable.data.v2.internal.RequestContext;
@@ -97,8 +103,12 @@
import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerUnaryCallable;
import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTracerFactory;
import com.google.cloud.bigtable.data.v2.stub.metrics.CompositeTracerFactory;
+import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
+import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.metrics.ErrorCountPerConnectionMetricTracker;
+import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsTracerFactory;
+import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants;
import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersServerStreamingCallable;
import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersUnaryCallable;
@@ -130,6 +140,8 @@
import io.opencensus.tags.TagValue;
import io.opencensus.tags.Tagger;
import io.opencensus.tags.Tags;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.Attributes;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
@@ -137,6 +149,8 @@
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
+import java.util.logging.Level;
+import java.util.logging.Logger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
@@ -154,6 +168,9 @@
*/
@InternalApi
public class EnhancedBigtableStub implements AutoCloseable {
+
+ private static final Logger logger = Logger.getLogger(EnhancedBigtableStub.class.getName());
+
private static final String CLIENT_NAME = "Bigtable";
private static final long FLOW_CONTROL_ADJUSTING_INTERVAL_MS = TimeUnit.SECONDS.toMillis(20);
private final EnhancedBigtableStubSettings settings;
@@ -185,10 +202,25 @@ public class EnhancedBigtableStub implements AutoCloseable {
public static EnhancedBigtableStub create(EnhancedBigtableStubSettings settings)
throws IOException {
- settings = settings.toBuilder().setTracerFactory(createBigtableTracerFactory(settings)).build();
ClientContext clientContext = createClientContext(settings);
-
- return new EnhancedBigtableStub(settings, clientContext);
+ OpenTelemetry openTelemetry = null;
+ try {
+ // We don't want client side metrics to crash the client, so catch any exception when getting
+ // the OTEL instance and log the exception instead.
+ openTelemetry =
+ getOpenTelemetry(
+ settings.getProjectId(),
+ settings.getMetricsProvider(),
+ clientContext.getCredentials());
+ } catch (Throwable t) {
+ logger.log(Level.WARNING, "Failed to get OTEL, will skip exporting client side metrics", t);
+ }
+ ClientContext contextWithTracer =
+ clientContext
+ .toBuilder()
+ .setTracerFactory(createBigtableTracerFactory(settings, openTelemetry))
+ .build();
+ return new EnhancedBigtableStub(settings, contextWithTracer);
}
public static EnhancedBigtableStub createWithClientContext(
@@ -207,15 +239,33 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set
// workaround JWT audience issues
patchCredentials(builder);
+ // Fix the credentials so that they can be shared
+ Credentials credentials = null;
+ if (builder.getCredentialsProvider() != null) {
+ credentials = builder.getCredentialsProvider().getCredentials();
+ }
+ builder.setCredentialsProvider(FixedCredentialsProvider.create(credentials));
+
InstantiatingGrpcChannelProvider.Builder transportProvider =
builder.getTransportChannelProvider() instanceof InstantiatingGrpcChannelProvider
? ((InstantiatingGrpcChannelProvider) builder.getTransportChannelProvider()).toBuilder()
: null;
+ OpenTelemetry openTelemetry = null;
+ try {
+ // We don't want client side metrics to crash the client, so catch any exception when getting
+ // the OTEL instance and log the exception instead.
+ openTelemetry =
+ getOpenTelemetry(settings.getProjectId(), settings.getMetricsProvider(), credentials);
+ } catch (Throwable t) {
+ logger.log(Level.WARNING, "Failed to get OTEL, will skip exporting client side metrics", t);
+ }
ErrorCountPerConnectionMetricTracker errorCountPerConnectionMetricTracker;
- if (transportProvider != null) {
+ // Skip setting up ErrorCountPerConnectionMetricTracker if openTelemetry is null
+ if (openTelemetry != null && transportProvider != null) {
errorCountPerConnectionMetricTracker =
- new ErrorCountPerConnectionMetricTracker(createBuiltinAttributes(builder));
+ new ErrorCountPerConnectionMetricTracker(
+ openTelemetry, createBuiltinAttributes(settings));
ApiFunction oldChannelConfigurator =
transportProvider.getChannelConfigurator();
transportProvider.setChannelConfigurator(
@@ -237,12 +287,6 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set
// Inject channel priming
if (settings.isRefreshingChannel()) {
- // Fix the credentials so that they can be shared
- Credentials credentials = null;
- if (builder.getCredentialsProvider() != null) {
- credentials = builder.getCredentialsProvider().getCredentials();
- }
- builder.setCredentialsProvider(FixedCredentialsProvider.create(credentials));
if (transportProvider != null) {
transportProvider.setChannelPrimer(
@@ -267,13 +311,19 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set
}
public static ApiTracerFactory createBigtableTracerFactory(
- EnhancedBigtableStubSettings settings) {
- return createBigtableTracerFactory(settings, Tags.getTagger(), Stats.getStatsRecorder());
+ EnhancedBigtableStubSettings settings, @Nullable OpenTelemetry openTelemetry)
+ throws IOException {
+ return createBigtableTracerFactory(
+ settings, Tags.getTagger(), Stats.getStatsRecorder(), openTelemetry);
}
@VisibleForTesting
public static ApiTracerFactory createBigtableTracerFactory(
- EnhancedBigtableStubSettings settings, Tagger tagger, StatsRecorder stats) {
+ EnhancedBigtableStubSettings settings,
+ Tagger tagger,
+ StatsRecorder stats,
+ @Nullable OpenTelemetry openTelemetry)
+ throws IOException {
String projectId = settings.getProjectId();
String instanceId = settings.getInstanceId();
String appProfileId = settings.getAppProfileId();
@@ -284,10 +334,10 @@ public static ApiTracerFactory createBigtableTracerFactory(
.put(RpcMeasureConstants.BIGTABLE_INSTANCE_ID, TagValue.create(instanceId))
.put(RpcMeasureConstants.BIGTABLE_APP_PROFILE_ID, TagValue.create(appProfileId))
.build();
- ImmutableMap builtinAttributes = createBuiltinAttributes(settings.toBuilder());
- return new CompositeTracerFactory(
- ImmutableList.of(
+ ImmutableList.Builder tracerFactories = ImmutableList.builder();
+ tracerFactories
+ .add(
// Add OpenCensus Tracing
new OpencensusTracerFactory(
ImmutableMap.builder()
@@ -299,22 +349,52 @@ public static ApiTracerFactory createBigtableTracerFactory(
.put("gax", GaxGrpcProperties.getGaxGrpcVersion())
.put("grpc", GaxGrpcProperties.getGrpcVersion())
.put("gapic", Version.VERSION)
- .build()),
- // Add OpenCensus Metrics
- MetricsTracerFactory.create(tagger, stats, attributes),
- BuiltinMetricsTracerFactory.create(builtinAttributes),
- // Add user configured tracer
- settings.getTracerFactory()));
+ .build()))
+ // Add OpenCensus Metrics
+ .add(MetricsTracerFactory.create(tagger, stats, attributes))
+ // Add user configured tracer
+ .add(settings.getTracerFactory());
+ BuiltinMetricsTracerFactory builtinMetricsTracerFactory =
+ openTelemetry != null
+ ? BuiltinMetricsTracerFactory.create(openTelemetry, createBuiltinAttributes(settings))
+ : null;
+ if (builtinMetricsTracerFactory != null) {
+ tracerFactories.add(builtinMetricsTracerFactory);
+ }
+ return new CompositeTracerFactory(tracerFactories.build());
+ }
+
+ @Nullable
+ public static OpenTelemetry getOpenTelemetry(
+ String projectId, MetricsProvider metricsProvider, @Nullable Credentials defaultCredentials)
+ throws IOException {
+ if (metricsProvider instanceof CustomOpenTelemetryMetricsProvider) {
+ CustomOpenTelemetryMetricsProvider customMetricsProvider =
+ (CustomOpenTelemetryMetricsProvider) metricsProvider;
+ return customMetricsProvider.getOpenTelemetry();
+ } else if (metricsProvider instanceof DefaultMetricsProvider) {
+ Credentials credentials =
+ BigtableDataSettings.getMetricsCredentials() != null
+ ? BigtableDataSettings.getMetricsCredentials()
+ : defaultCredentials;
+ DefaultMetricsProvider defaultMetricsProvider = (DefaultMetricsProvider) metricsProvider;
+ return defaultMetricsProvider.getOpenTelemetry(projectId, credentials);
+ } else if (metricsProvider instanceof NoopMetricsProvider) {
+ return null;
+ }
+ throw new IOException("Invalid MetricsProvider type " + metricsProvider);
}
- private static ImmutableMap createBuiltinAttributes(
- EnhancedBigtableStubSettings.Builder builder) {
- return ImmutableMap.builder()
- .put("project_id", builder.getProjectId())
- .put("instance", builder.getInstanceId())
- .put("app_profile", builder.getAppProfileId())
- .put("client_name", "bigtable-java/" + Version.VERSION)
- .build();
+ private static Attributes createBuiltinAttributes(EnhancedBigtableStubSettings settings) {
+ return Attributes.of(
+ BIGTABLE_PROJECT_ID_KEY,
+ settings.getProjectId(),
+ INSTANCE_ID_KEY,
+ settings.getInstanceId(),
+ APP_PROFILE_KEY,
+ settings.getAppProfileId(),
+ CLIENT_NAME_KEY,
+ "bigtable-java/" + Version.VERSION);
}
private static void patchCredentials(EnhancedBigtableStubSettings.Builder settings)
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java
index 9a5027c740..f07a8fb7fc 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java
@@ -44,6 +44,8 @@
import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow;
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
+import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider;
+import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor;
import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsBatchingDescriptor;
import com.google.common.base.MoreObjects;
@@ -229,6 +231,8 @@ public class EnhancedBigtableStubSettings extends StubSettings getJwtAudienceMapping() {
return jwtAudienceMapping;
}
+ public MetricsProvider getMetricsProvider() {
+ return metricsProvider;
+ }
+
/**
* Gets if routing cookie is enabled. If true, client will retry a request with extra metadata
* server sent back.
@@ -636,6 +645,8 @@ public static class Builder extends StubSettings.Builder jwtAudienceMapping) {
return this;
}
+ /**
+ * Sets the {@link MetricsProvider}.
+ *
+ * By default, this is set to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider#INSTANCE} which will
+ * collect and export client side metrics.
+ *
+ *
To disable client side metrics, set it to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider#INSTANCE}.
+ *
+ *
To use a custom OpenTelemetry instance, refer to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider} on how to
+ * set it up.
+ */
+ public Builder setMetricsProvider(MetricsProvider metricsProvider) {
+ this.metricsProvider = Preconditions.checkNotNull(metricsProvider);
+ return this;
+ }
+
+ /** Gets the {@link MetricsProvider}. */
+ public MetricsProvider getMetricsProvider() {
+ return this.metricsProvider;
+ }
+
@InternalApi("Used for internal testing")
public Map getJwtAudienceMapping() {
return jwtAudienceMapping;
@@ -1028,6 +1067,11 @@ public EnhancedBigtableStubSettings build() {
featureFlags.setRoutingCookie(this.getEnableRoutingCookie());
featureFlags.setRetryInfo(this.getEnableRetryInfo());
+ // client_Side_metrics_enabled feature flag is only set when a user is running with a
+ // DefaultMetricsProvider. This may cause false negatives when a user registered the
+ // metrics on their CustomOpenTelemetryMetricsProvider.
+ featureFlags.setClientSideMetricsEnabled(
+ this.getMetricsProvider() instanceof DefaultMetricsProvider);
// Serialize the web64 encode the bigtable feature flags
ByteArrayOutputStream boas = new ByteArrayOutputStream();
@@ -1080,6 +1124,7 @@ public String toString() {
generateInitialChangeStreamPartitionsSettings)
.add("readChangeStreamSettings", readChangeStreamSettings)
.add("pingAndWarmSettings", pingAndWarmSettings)
+ .add("metricsProvider", metricsProvider)
.add("parent", super.toString())
.toString();
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java
index 6208fce89e..97cc2f73ec 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java
@@ -86,7 +86,7 @@ public void call(
stopwatch.stop();
if (context.getTracer() instanceof BigtableTracer) {
((BigtableTracer) context.getTracer())
- .batchRequestThrottled(stopwatch.elapsed(TimeUnit.MILLISECONDS));
+ .batchRequestThrottled(stopwatch.elapsed(TimeUnit.NANOSECONDS));
}
RateLimitingResponseObserver innerObserver =
new RateLimitingResponseObserver(limiter, lastQpsChangeTime, responseObserver);
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java
new file mode 100644
index 0000000000..81473ae4d4
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java
@@ -0,0 +1,364 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME;
+
+import com.google.api.MonitoredResource;
+import com.google.api.core.ApiFuture;
+import com.google.api.core.ApiFutureCallback;
+import com.google.api.core.ApiFutures;
+import com.google.api.core.InternalApi;
+import com.google.api.gax.core.CredentialsProvider;
+import com.google.api.gax.core.FixedCredentialsProvider;
+import com.google.api.gax.core.NoCredentialsProvider;
+import com.google.auth.Credentials;
+import com.google.cloud.monitoring.v3.MetricServiceClient;
+import com.google.cloud.monitoring.v3.MetricServiceSettings;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.monitoring.v3.CreateTimeSeriesRequest;
+import com.google.monitoring.v3.ProjectName;
+import com.google.monitoring.v3.TimeSeries;
+import com.google.protobuf.Empty;
+import io.opentelemetry.sdk.common.CompletableResultCode;
+import io.opentelemetry.sdk.metrics.InstrumentType;
+import io.opentelemetry.sdk.metrics.data.AggregationTemporality;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.export.MetricExporter;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+import javax.annotation.Nullable;
+import org.threeten.bp.Duration;
+
+/**
+ * Bigtable Cloud Monitoring OpenTelemetry Exporter.
+ *
+ * The exporter will look for all bigtable owned metrics under bigtable.googleapis.com
+ * instrumentation scope and upload it via the Google Cloud Monitoring API.
+ */
+@InternalApi
+public final class BigtableCloudMonitoringExporter implements MetricExporter {
+
+ private static final Logger logger =
+ Logger.getLogger(BigtableCloudMonitoringExporter.class.getName());
+
+ // This system property can be used to override the monitoring endpoint
+ // to a different environment. It's meant for internal testing only.
+ private static final String MONITORING_ENDPOINT =
+ MoreObjects.firstNonNull(
+ System.getProperty("bigtable.test-monitoring-endpoint"),
+ MetricServiceSettings.getDefaultEndpoint());
+
+ private static final String APPLICATION_RESOURCE_PROJECT_ID = "project_id";
+
+ private final MetricServiceClient client;
+
+ private final String bigtableProjectId;
+ private final String taskId;
+
+ // The resource the client application is running on
+ private final MonitoredResource applicationResource;
+
+ private final AtomicBoolean isShutdown = new AtomicBoolean(false);
+
+ private CompletableResultCode lastExportCode;
+
+ private static final ImmutableList BIGTABLE_TABLE_METRICS =
+ ImmutableSet.of(
+ OPERATION_LATENCIES_NAME,
+ ATTEMPT_LATENCIES_NAME,
+ SERVER_LATENCIES_NAME,
+ FIRST_RESPONSE_LATENCIES_NAME,
+ CLIENT_BLOCKING_LATENCIES_NAME,
+ APPLICATION_BLOCKING_LATENCIES_NAME,
+ RETRY_COUNT_NAME,
+ CONNECTIVITY_ERROR_COUNT_NAME)
+ .stream()
+ .map(m -> METER_NAME + m)
+ .collect(ImmutableList.toImmutableList());
+
+ private static final ImmutableList APPLICATION_METRICS =
+ ImmutableSet.of(PER_CONNECTION_ERROR_COUNT_NAME).stream()
+ .map(m -> METER_NAME + m)
+ .collect(ImmutableList.toImmutableList());
+
+ public static BigtableCloudMonitoringExporter create(
+ String projectId, @Nullable Credentials credentials) throws IOException {
+ MetricServiceSettings.Builder settingsBuilder = MetricServiceSettings.newBuilder();
+ CredentialsProvider credentialsProvider =
+ Optional.ofNullable(credentials)
+ .map(FixedCredentialsProvider::create)
+ .orElse(NoCredentialsProvider.create());
+ settingsBuilder.setCredentialsProvider(credentialsProvider);
+ settingsBuilder.setEndpoint(MONITORING_ENDPOINT);
+
+ org.threeten.bp.Duration timeout = Duration.ofMinutes(1);
+ // TODO: createServiceTimeSeries needs special handling if the request failed. Leaving
+ // it as not retried for now.
+ settingsBuilder.createServiceTimeSeriesSettings().setSimpleTimeoutNoRetries(timeout);
+
+ // Detect the resource that the client application is running on. For example,
+ // this could be a GCE instance or a GKE pod. Currently, we only support GCE instance and
+ // GKE pod. This method will return null for everything else.
+ MonitoredResource applicationResource = null;
+ try {
+ applicationResource = BigtableExporterUtils.detectResource();
+ } catch (Exception e) {
+ logger.log(
+ Level.WARNING,
+ "Failed to detect resource, will skip exporting application level metrics ",
+ e);
+ }
+
+ return new BigtableCloudMonitoringExporter(
+ projectId,
+ MetricServiceClient.create(settingsBuilder.build()),
+ applicationResource,
+ BigtableExporterUtils.getDefaultTaskValue());
+ }
+
+ @VisibleForTesting
+ BigtableCloudMonitoringExporter(
+ String projectId,
+ MetricServiceClient client,
+ @Nullable MonitoredResource applicationResource,
+ String taskId) {
+ this.client = client;
+ this.taskId = taskId;
+ this.applicationResource = applicationResource;
+ this.bigtableProjectId = projectId;
+ }
+
+ @Override
+ public CompletableResultCode export(Collection collection) {
+ if (isShutdown.get()) {
+ logger.log(Level.WARNING, "Exporter is shutting down");
+ return CompletableResultCode.ofFailure();
+ }
+
+ CompletableResultCode bigtableExportCode = exportBigtableResourceMetrics(collection);
+ CompletableResultCode applicationExportCode = exportApplicationResourceMetrics(collection);
+
+ lastExportCode =
+ CompletableResultCode.ofAll(ImmutableList.of(applicationExportCode, bigtableExportCode));
+
+ return lastExportCode;
+ }
+
+ /** Export metrics associated with a BigtableTable resource. */
+ private CompletableResultCode exportBigtableResourceMetrics(Collection collection) {
+ // Filter bigtable table metrics
+ List bigtableMetricData =
+ collection.stream()
+ .filter(md -> BIGTABLE_TABLE_METRICS.contains(md.getName()))
+ .collect(Collectors.toList());
+
+ // Skips exporting if there's none
+ if (bigtableMetricData.isEmpty()) {
+ return CompletableResultCode.ofSuccess();
+ }
+
+ // Verifies metrics project id are the same as the bigtable project id set on this client
+ if (!bigtableMetricData.stream()
+ .flatMap(metricData -> metricData.getData().getPoints().stream())
+ .allMatch(pd -> bigtableProjectId.equals(BigtableExporterUtils.getProjectId(pd)))) {
+ logger.log(Level.WARNING, "Metric data has different a projectId. Skip exporting.");
+ return CompletableResultCode.ofFailure();
+ }
+
+ List bigtableTimeSeries;
+ try {
+ bigtableTimeSeries =
+ BigtableExporterUtils.convertToBigtableTimeSeries(bigtableMetricData, taskId);
+ } catch (Throwable e) {
+ logger.log(
+ Level.WARNING,
+ "Failed to convert bigtable table metric data to cloud monitoring timeseries.",
+ e);
+ return CompletableResultCode.ofFailure();
+ }
+
+ ProjectName projectName = ProjectName.of(bigtableProjectId);
+ CreateTimeSeriesRequest bigtableRequest =
+ CreateTimeSeriesRequest.newBuilder()
+ .setName(projectName.toString())
+ .addAllTimeSeries(bigtableTimeSeries)
+ .build();
+
+ ApiFuture future =
+ this.client.createServiceTimeSeriesCallable().futureCall(bigtableRequest);
+
+ CompletableResultCode bigtableExportCode = new CompletableResultCode();
+ ApiFutures.addCallback(
+ future,
+ new ApiFutureCallback() {
+ @Override
+ public void onFailure(Throwable throwable) {
+ logger.log(
+ Level.WARNING,
+ "createServiceTimeSeries request failed for bigtable metrics. ",
+ throwable);
+ bigtableExportCode.fail();
+ }
+
+ @Override
+ public void onSuccess(Empty empty) {
+ bigtableExportCode.succeed();
+ }
+ },
+ MoreExecutors.directExecutor());
+
+ return bigtableExportCode;
+ }
+
+ /** Export metrics associated with the resource the Application is running on. */
+ private CompletableResultCode exportApplicationResourceMetrics(
+ Collection collection) {
+ if (applicationResource == null) {
+ return CompletableResultCode.ofSuccess();
+ }
+
+ // Filter application level metrics
+ List metricData =
+ collection.stream()
+ .filter(md -> APPLICATION_METRICS.contains(md.getName()))
+ .collect(Collectors.toList());
+
+ // Skip exporting if there's none
+ if (metricData.isEmpty()) {
+ return CompletableResultCode.ofSuccess();
+ }
+
+ List timeSeries;
+ try {
+ timeSeries =
+ BigtableExporterUtils.convertToApplicationResourceTimeSeries(
+ metricData, taskId, applicationResource);
+ } catch (Throwable e) {
+ logger.log(
+ Level.WARNING,
+ "Failed to convert application metric data to cloud monitoring timeseries.",
+ e);
+ return CompletableResultCode.ofFailure();
+ }
+
+ // Construct the request. The project id will be the project id of the detected monitored
+ // resource.
+ ApiFuture gceOrGkeFuture;
+ CompletableResultCode exportCode = new CompletableResultCode();
+ try {
+ ProjectName projectName =
+ ProjectName.of(applicationResource.getLabelsOrThrow(APPLICATION_RESOURCE_PROJECT_ID));
+ CreateTimeSeriesRequest request =
+ CreateTimeSeriesRequest.newBuilder()
+ .setName(projectName.toString())
+ .addAllTimeSeries(timeSeries)
+ .build();
+
+ gceOrGkeFuture = this.client.createServiceTimeSeriesCallable().futureCall(request);
+
+ ApiFutures.addCallback(
+ gceOrGkeFuture,
+ new ApiFutureCallback() {
+ @Override
+ public void onFailure(Throwable throwable) {
+ logger.log(
+ Level.WARNING,
+ "createServiceTimeSeries request failed for per connection error metrics.",
+ throwable);
+ exportCode.fail();
+ }
+
+ @Override
+ public void onSuccess(Empty empty) {
+ exportCode.succeed();
+ }
+ },
+ MoreExecutors.directExecutor());
+
+ } catch (Exception e) {
+ logger.log(
+ Level.WARNING,
+ "Failed to get projectName for application resource " + applicationResource);
+ return CompletableResultCode.ofFailure();
+ }
+
+ return exportCode;
+ }
+
+ @Override
+ public CompletableResultCode flush() {
+ if (lastExportCode != null) {
+ return lastExportCode;
+ }
+ return CompletableResultCode.ofSuccess();
+ }
+
+ @Override
+ public CompletableResultCode shutdown() {
+ if (!isShutdown.compareAndSet(false, true)) {
+ logger.log(Level.WARNING, "shutdown is called multiple times");
+ return CompletableResultCode.ofSuccess();
+ }
+ CompletableResultCode flushResult = flush();
+ CompletableResultCode shutdownResult = new CompletableResultCode();
+ flushResult.whenComplete(
+ () -> {
+ Throwable throwable = null;
+ try {
+ client.shutdown();
+ } catch (Throwable e) {
+ logger.log(Level.WARNING, "failed to shutdown the monitoring client", e);
+ throwable = e;
+ }
+ if (throwable != null) {
+ shutdownResult.fail();
+ } else {
+ shutdownResult.succeed();
+ }
+ });
+ return CompletableResultCode.ofAll(Arrays.asList(flushResult, shutdownResult));
+ }
+
+ /**
+ * For Google Cloud Monitoring always return CUMULATIVE to keep track of the cumulative value of a
+ * metric over time.
+ */
+ @Override
+ public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) {
+ return AggregationTemporality.CUMULATIVE;
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java
new file mode 100644
index 0000000000..5bf6688e17
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java
@@ -0,0 +1,367 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import static com.google.api.Distribution.BucketOptions;
+import static com.google.api.Distribution.BucketOptions.Explicit;
+import static com.google.api.MetricDescriptor.MetricKind;
+import static com.google.api.MetricDescriptor.MetricKind.CUMULATIVE;
+import static com.google.api.MetricDescriptor.MetricKind.GAUGE;
+import static com.google.api.MetricDescriptor.MetricKind.UNRECOGNIZED;
+import static com.google.api.MetricDescriptor.ValueType;
+import static com.google.api.MetricDescriptor.ValueType.DISTRIBUTION;
+import static com.google.api.MetricDescriptor.ValueType.DOUBLE;
+import static com.google.api.MetricDescriptor.ValueType.INT64;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
+
+import com.google.api.Distribution;
+import com.google.api.Metric;
+import com.google.api.MonitoredResource;
+import com.google.cloud.opentelemetry.detection.AttributeKeys;
+import com.google.cloud.opentelemetry.detection.DetectedPlatform;
+import com.google.cloud.opentelemetry.detection.GCPPlatformDetector;
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+import com.google.monitoring.v3.Point;
+import com.google.monitoring.v3.TimeInterval;
+import com.google.monitoring.v3.TimeSeries;
+import com.google.monitoring.v3.TypedValue;
+import com.google.protobuf.util.Timestamps;
+import io.opentelemetry.api.common.AttributeKey;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.metrics.data.AggregationTemporality;
+import io.opentelemetry.sdk.metrics.data.DoublePointData;
+import io.opentelemetry.sdk.metrics.data.HistogramData;
+import io.opentelemetry.sdk.metrics.data.HistogramPointData;
+import io.opentelemetry.sdk.metrics.data.LongPointData;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.MetricDataType;
+import io.opentelemetry.sdk.metrics.data.PointData;
+import io.opentelemetry.sdk.metrics.data.SumData;
+import java.lang.management.ManagementFactory;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import javax.annotation.Nullable;
+
+/** Utils to convert OpenTelemetry types to Google Cloud Monitoring types. */
+class BigtableExporterUtils {
+
+ private static final Logger logger = Logger.getLogger(BigtableExporterUtils.class.getName());
+
+ private static final String BIGTABLE_RESOURCE_TYPE = "bigtable_client_raw";
+
+ // These metric labels will be promoted to the bigtable_table monitored resource fields
+ private static final Set> BIGTABLE_PROMOTED_RESOURCE_LABELS =
+ ImmutableSet.of(
+ BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, TABLE_ID_KEY, CLUSTER_ID_KEY, ZONE_ID_KEY);
+
+ private BigtableExporterUtils() {}
+
+ /**
+ * In most cases this should look like java-${UUID}@${hostname}. The hostname will be retrieved
+ * from the jvm name and fallback to the local hostname.
+ */
+ static String getDefaultTaskValue() {
+ // Something like '@'
+ final String jvmName = ManagementFactory.getRuntimeMXBean().getName();
+ // If jvm doesn't have the expected format, fallback to the local hostname
+ if (jvmName.indexOf('@') < 1) {
+ String hostname = "localhost";
+ try {
+ hostname = InetAddress.getLocalHost().getHostName();
+ } catch (UnknownHostException e) {
+ logger.log(Level.INFO, "Unable to get the hostname.", e);
+ }
+ // Generate a random number and use the same format "random_number@hostname".
+ return "java-" + UUID.randomUUID() + "@" + hostname;
+ }
+ return "java-" + UUID.randomUUID() + jvmName;
+ }
+
+ static String getProjectId(PointData pointData) {
+ return pointData.getAttributes().get(BIGTABLE_PROJECT_ID_KEY);
+ }
+
+ static List convertToBigtableTimeSeries(List collection, String taskId) {
+ List allTimeSeries = new ArrayList<>();
+
+ for (MetricData metricData : collection) {
+ if (!metricData.getInstrumentationScopeInfo().getName().equals(METER_NAME)) {
+ // Filter out metric data for instruments that are not part of the bigtable builtin metrics
+ continue;
+ }
+ metricData.getData().getPoints().stream()
+ .map(pointData -> convertPointToBigtableTimeSeries(metricData, pointData, taskId))
+ .forEach(allTimeSeries::add);
+ }
+
+ return allTimeSeries;
+ }
+
+ static List convertToApplicationResourceTimeSeries(
+ Collection collection, String taskId, MonitoredResource applicationResource) {
+ Preconditions.checkNotNull(
+ applicationResource,
+ "convert application metrics is called when the supported resource is not detected");
+ List allTimeSeries = new ArrayList<>();
+ for (MetricData metricData : collection) {
+ if (!metricData.getInstrumentationScopeInfo().getName().equals(METER_NAME)) {
+ // Filter out metric data for instruments that are not part of the bigtable builtin metrics
+ continue;
+ }
+ metricData.getData().getPoints().stream()
+ .map(
+ pointData ->
+ convertPointToApplicationResourceTimeSeries(
+ metricData, pointData, taskId, applicationResource))
+ .forEach(allTimeSeries::add);
+ }
+ return allTimeSeries;
+ }
+
+ @Nullable
+ static MonitoredResource detectResource() {
+ GCPPlatformDetector detector = GCPPlatformDetector.DEFAULT_INSTANCE;
+ DetectedPlatform detectedPlatform = detector.detectPlatform();
+ MonitoredResource monitoredResource = null;
+ try {
+ switch (detectedPlatform.getSupportedPlatform()) {
+ case GOOGLE_COMPUTE_ENGINE:
+ monitoredResource =
+ createGceMonitoredResource(
+ detectedPlatform.getProjectId(), detectedPlatform.getAttributes());
+ break;
+ case GOOGLE_KUBERNETES_ENGINE:
+ monitoredResource =
+ createGkeMonitoredResource(
+ detectedPlatform.getProjectId(), detectedPlatform.getAttributes());
+ break;
+ }
+ } catch (IllegalStateException e) {
+ logger.log(
+ Level.WARNING,
+ "Failed to create monitored resource for " + detectedPlatform.getSupportedPlatform(),
+ e);
+ }
+ return monitoredResource;
+ }
+
+ private static MonitoredResource createGceMonitoredResource(
+ String projectId, Map attributes) {
+ return MonitoredResource.newBuilder()
+ .setType("gce_instance")
+ .putLabels("project_id", projectId)
+ .putLabels("instance_id", getAttribute(attributes, AttributeKeys.GCE_INSTANCE_ID))
+ .putLabels("zone", getAttribute(attributes, AttributeKeys.GCE_AVAILABILITY_ZONE))
+ .build();
+ }
+
+ private static MonitoredResource createGkeMonitoredResource(
+ String projectId, Map attributes) {
+ return MonitoredResource.newBuilder()
+ .setType("k8s_container")
+ .putLabels("project_id", projectId)
+ .putLabels("location", getAttribute(attributes, AttributeKeys.GKE_CLUSTER_LOCATION))
+ .putLabels("cluster_name", getAttribute(attributes, AttributeKeys.GKE_CLUSTER_NAME))
+ .putLabels("namespace_name", MoreObjects.firstNonNull(System.getenv("NAMESPACE"), ""))
+ .putLabels("pod_name", MoreObjects.firstNonNull(System.getenv("HOSTNAME"), ""))
+ .putLabels("container_name", MoreObjects.firstNonNull(System.getenv("CONTAINER_NAME"), ""))
+ .build();
+ }
+
+ private static String getAttribute(Map attributes, String key) {
+ String value = attributes.get(key);
+ if (value == null) {
+ throw new IllegalStateException(
+ "Required attribute " + key + " does not exist in the attributes map " + attributes);
+ }
+ return value;
+ }
+
+ private static TimeSeries convertPointToBigtableTimeSeries(
+ MetricData metricData, PointData pointData, String taskId) {
+ TimeSeries.Builder builder =
+ TimeSeries.newBuilder()
+ .setMetricKind(convertMetricKind(metricData))
+ .setValueType(convertValueType(metricData.getType()));
+ Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName());
+
+ Attributes attributes = pointData.getAttributes();
+ MonitoredResource.Builder monitoredResourceBuilder =
+ MonitoredResource.newBuilder().setType(BIGTABLE_RESOURCE_TYPE);
+
+ for (AttributeKey> key : attributes.asMap().keySet()) {
+ if (BIGTABLE_PROMOTED_RESOURCE_LABELS.contains(key)) {
+ monitoredResourceBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key)));
+ } else {
+ metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key)));
+ }
+ }
+
+ builder.setResource(monitoredResourceBuilder.build());
+
+ metricBuilder.putLabels(CLIENT_UID_KEY.getKey(), taskId);
+ builder.setMetric(metricBuilder.build());
+
+ TimeInterval timeInterval =
+ TimeInterval.newBuilder()
+ .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos()))
+ .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos()))
+ .build();
+
+ builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval));
+
+ return builder.build();
+ }
+
+ private static TimeSeries convertPointToApplicationResourceTimeSeries(
+ MetricData metricData,
+ PointData pointData,
+ String taskId,
+ MonitoredResource applicationResource) {
+ TimeSeries.Builder builder =
+ TimeSeries.newBuilder()
+ .setMetricKind(convertMetricKind(metricData))
+ .setValueType(convertValueType(metricData.getType()))
+ .setResource(applicationResource);
+
+ Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName());
+
+ Attributes attributes = pointData.getAttributes();
+ for (AttributeKey> key : attributes.asMap().keySet()) {
+ metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key)));
+ }
+
+ metricBuilder.putLabels(CLIENT_UID_KEY.getKey(), taskId);
+ builder.setMetric(metricBuilder.build());
+
+ TimeInterval timeInterval =
+ TimeInterval.newBuilder()
+ .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos()))
+ .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos()))
+ .build();
+
+ builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval));
+ return builder.build();
+ }
+
+ private static MetricKind convertMetricKind(MetricData metricData) {
+ switch (metricData.getType()) {
+ case HISTOGRAM:
+ case EXPONENTIAL_HISTOGRAM:
+ return convertHistogramType(metricData.getHistogramData());
+ case LONG_GAUGE:
+ case DOUBLE_GAUGE:
+ return GAUGE;
+ case LONG_SUM:
+ return convertSumDataType(metricData.getLongSumData());
+ case DOUBLE_SUM:
+ return convertSumDataType(metricData.getDoubleSumData());
+ default:
+ return UNRECOGNIZED;
+ }
+ }
+
+ private static MetricKind convertHistogramType(HistogramData histogramData) {
+ if (histogramData.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) {
+ return CUMULATIVE;
+ }
+ return UNRECOGNIZED;
+ }
+
+ private static MetricKind convertSumDataType(SumData> sum) {
+ if (!sum.isMonotonic()) {
+ return GAUGE;
+ }
+ if (sum.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) {
+ return CUMULATIVE;
+ }
+ return UNRECOGNIZED;
+ }
+
+ private static ValueType convertValueType(MetricDataType metricDataType) {
+ switch (metricDataType) {
+ case LONG_GAUGE:
+ case LONG_SUM:
+ return INT64;
+ case DOUBLE_GAUGE:
+ case DOUBLE_SUM:
+ return DOUBLE;
+ case HISTOGRAM:
+ case EXPONENTIAL_HISTOGRAM:
+ return DISTRIBUTION;
+ default:
+ return ValueType.UNRECOGNIZED;
+ }
+ }
+
+ private static Point createPoint(
+ MetricDataType type, PointData pointData, TimeInterval timeInterval) {
+ Point.Builder builder = Point.newBuilder().setInterval(timeInterval);
+ switch (type) {
+ case HISTOGRAM:
+ case EXPONENTIAL_HISTOGRAM:
+ return builder
+ .setValue(
+ TypedValue.newBuilder()
+ .setDistributionValue(convertHistogramData((HistogramPointData) pointData))
+ .build())
+ .build();
+ case DOUBLE_GAUGE:
+ case DOUBLE_SUM:
+ return builder
+ .setValue(
+ TypedValue.newBuilder()
+ .setDoubleValue(((DoublePointData) pointData).getValue())
+ .build())
+ .build();
+ case LONG_GAUGE:
+ case LONG_SUM:
+ return builder
+ .setValue(TypedValue.newBuilder().setInt64Value(((LongPointData) pointData).getValue()))
+ .build();
+ default:
+ logger.log(Level.WARNING, "unsupported metric type");
+ return builder.build();
+ }
+ }
+
+ private static Distribution convertHistogramData(HistogramPointData pointData) {
+ return Distribution.newBuilder()
+ .setCount(pointData.getCount())
+ .setMean(pointData.getCount() == 0L ? 0.0D : pointData.getSum() / pointData.getCount())
+ .setBucketOptions(
+ BucketOptions.newBuilder()
+ .setExplicitBuckets(Explicit.newBuilder().addAllBounds(pointData.getBoundaries())))
+ .addAllBucketCounts(pointData.getCounts())
+ .build();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java
index 1cda49934c..3b2242385a 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java
@@ -42,7 +42,7 @@ public void streamCreated(Attributes transportAttrs, Metadata headers) {
@Override
public void outboundMessageSent(int seqNo, long optionalWireSize, long optionalUncompressedSize) {
- tracer.grpcChannelQueuedLatencies(stopwatch.elapsed(TimeUnit.MILLISECONDS));
+ tracer.grpcChannelQueuedLatencies(stopwatch.elapsed(TimeUnit.NANOSECONDS));
}
static class Factory extends ClientStreamTracer.Factory {
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java
new file mode 100644
index 0000000000..d85300828b
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.api.core.InternalApi;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import io.opentelemetry.api.common.AttributeKey;
+import io.opentelemetry.sdk.metrics.Aggregation;
+import io.opentelemetry.sdk.metrics.InstrumentSelector;
+import io.opentelemetry.sdk.metrics.InstrumentType;
+import io.opentelemetry.sdk.metrics.View;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/** Defining Bigtable builit-in metrics scope, attributes, metric names and views. */
+@InternalApi
+public class BuiltinMetricsConstants {
+
+ // Metric attribute keys for monitored resource
+ public static final AttributeKey BIGTABLE_PROJECT_ID_KEY =
+ AttributeKey.stringKey("project_id");
+ public static final AttributeKey INSTANCE_ID_KEY = AttributeKey.stringKey("instance");
+ public static final AttributeKey TABLE_ID_KEY = AttributeKey.stringKey("table");
+ public static final AttributeKey CLUSTER_ID_KEY = AttributeKey.stringKey("cluster");
+ public static final AttributeKey ZONE_ID_KEY = AttributeKey.stringKey("zone");
+
+ // Metric attribute keys for labels
+ // We need to access APP_PROFILE_KEY in EnhancedBigtableStubSettings and STREAMING_KEY in
+ // IT tests, so they're public.
+ public static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile");
+ public static final AttributeKey STREAMING_KEY = AttributeKey.booleanKey("streaming");
+ public static final AttributeKey CLIENT_NAME_KEY = AttributeKey.stringKey("client_name");
+ static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method");
+ static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status");
+ static final AttributeKey CLIENT_UID_KEY = AttributeKey.stringKey("client_uid");
+
+ // Metric names
+ public static final String OPERATION_LATENCIES_NAME = "operation_latencies";
+ public static final String ATTEMPT_LATENCIES_NAME = "attempt_latencies";
+ static final String RETRY_COUNT_NAME = "retry_count";
+ static final String CONNECTIVITY_ERROR_COUNT_NAME = "connectivity_error_count";
+ static final String SERVER_LATENCIES_NAME = "server_latencies";
+ static final String FIRST_RESPONSE_LATENCIES_NAME = "first_response_latencies";
+ static final String APPLICATION_BLOCKING_LATENCIES_NAME = "application_latencies";
+ static final String CLIENT_BLOCKING_LATENCIES_NAME = "throttling_latencies";
+ static final String PER_CONNECTION_ERROR_COUNT_NAME = "per_connection_error_count";
+
+ // Buckets under 100,000 are identical to buckets for server side metrics handler_latencies.
+ // Extending client side bucket to up to 3,200,000.
+ private static final Aggregation AGGREGATION_WITH_MILLIS_HISTOGRAM =
+ Aggregation.explicitBucketHistogram(
+ ImmutableList.of(
+ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, 30.0, 40.0,
+ 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0, 300.0, 400.0, 500.0, 650.0,
+ 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0, 100000.0, 200000.0,
+ 400000.0, 800000.0, 1600000.0, 3200000.0)); // max is 53.3 minutes
+
+ private static final Aggregation AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM =
+ Aggregation.explicitBucketHistogram(
+ ImmutableList.of(
+ 1.0,
+ 2.0,
+ 4.0,
+ 8.0,
+ 16.0,
+ 32.0,
+ 64.0,
+ 125.0,
+ 250.0,
+ 500.0,
+ 1_000.0,
+ 2_000.0,
+ 4_000.0,
+ 8_000.0,
+ 16_000.0,
+ 32_000.0,
+ 64_000.0,
+ 128_000.0,
+ 250_000.0,
+ 500_000.0,
+ 1_000_000.0));
+
+ public static final String METER_NAME = "bigtable.googleapis.com/internal/client/";
+
+ static final Set COMMON_ATTRIBUTES =
+ ImmutableSet.of(
+ BIGTABLE_PROJECT_ID_KEY,
+ INSTANCE_ID_KEY,
+ TABLE_ID_KEY,
+ APP_PROFILE_KEY,
+ CLUSTER_ID_KEY,
+ ZONE_ID_KEY,
+ METHOD_KEY,
+ CLIENT_NAME_KEY);
+
+ static void defineView(
+ ImmutableMap.Builder viewMap,
+ String id,
+ Aggregation aggregation,
+ InstrumentType type,
+ String unit,
+ Set attributes) {
+ InstrumentSelector selector =
+ InstrumentSelector.builder()
+ .setName(id)
+ .setMeterName(METER_NAME)
+ .setType(type)
+ .setUnit(unit)
+ .build();
+ Set attributesFilter =
+ ImmutableSet.builder()
+ .addAll(
+ COMMON_ATTRIBUTES.stream().map(AttributeKey::getKey).collect(Collectors.toSet()))
+ .addAll(attributes.stream().map(AttributeKey::getKey).collect(Collectors.toSet()))
+ .build();
+ View view =
+ View.builder()
+ .setName(METER_NAME + id)
+ .setAggregation(aggregation)
+ .setAttributeFilter(attributesFilter)
+ .build();
+
+ viewMap.put(selector, view);
+ }
+
+ public static Map getAllViews() {
+ ImmutableMap.Builder views = ImmutableMap.builder();
+
+ defineView(
+ views,
+ OPERATION_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder()
+ .addAll(COMMON_ATTRIBUTES)
+ .add(STREAMING_KEY, STATUS_KEY)
+ .build());
+ defineView(
+ views,
+ ATTEMPT_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder()
+ .addAll(COMMON_ATTRIBUTES)
+ .add(STREAMING_KEY, STATUS_KEY)
+ .build());
+ defineView(
+ views,
+ SERVER_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build());
+ defineView(
+ views,
+ FIRST_RESPONSE_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build());
+ defineView(
+ views,
+ APPLICATION_BLOCKING_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build());
+ defineView(
+ views,
+ CLIENT_BLOCKING_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build());
+ defineView(
+ views,
+ RETRY_COUNT_NAME,
+ Aggregation.sum(),
+ InstrumentType.COUNTER,
+ "1",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build());
+ defineView(
+ views,
+ CONNECTIVITY_ERROR_COUNT_NAME,
+ Aggregation.sum(),
+ InstrumentType.COUNTER,
+ "1",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build());
+
+ defineView(
+ views,
+ PER_CONNECTION_ERROR_COUNT_NAME,
+ AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "1",
+ ImmutableSet.builder()
+ .add(BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, APP_PROFILE_KEY, CLIENT_NAME_KEY)
+ .build());
+
+ return views.build();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java
index 2d8262a93e..abd214d760 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java
@@ -16,13 +16,22 @@
package com.google.cloud.bigtable.data.v2.stub.metrics;
import static com.google.api.gax.tracing.ApiTracerFactory.OperationType;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
import com.google.api.gax.retrying.ServerStreamingAttemptException;
import com.google.api.gax.tracing.SpanName;
-import com.google.cloud.bigtable.stats.StatsRecorderWrapper;
-import com.google.common.annotations.VisibleForTesting;
+import com.google.cloud.bigtable.Version;
import com.google.common.base.Stopwatch;
import com.google.common.math.IntMath;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.metrics.DoubleHistogram;
+import io.opentelemetry.api.metrics.LongCounter;
import java.util.concurrent.CancellationException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -37,8 +46,7 @@
*/
class BuiltinMetricsTracer extends BigtableTracer {
- private final StatsRecorderWrapper recorder;
-
+ private static final String NAME = "java-bigtable/" + Version.VERSION;
private final OperationType operationType;
private final SpanName spanName;
@@ -64,21 +72,56 @@ class BuiltinMetricsTracer extends BigtableTracer {
private boolean flowControlIsDisabled = false;
- private AtomicInteger requestLeft = new AtomicInteger(0);
+ private final AtomicInteger requestLeft = new AtomicInteger(0);
// Monitored resource labels
private String tableId = "unspecified";
private String zone = "global";
private String cluster = "unspecified";
- private AtomicLong totalClientBlockingTime = new AtomicLong(0);
+ private final AtomicLong totalClientBlockingTime = new AtomicLong(0);
+
+ private final Attributes baseAttributes;
+
+ private Long serverLatencies = null;
+
+ // OpenCensus (and server) histogram buckets use [start, end), however OpenTelemetry uses (start,
+ // end]. To work around this, we measure all the latencies in nanoseconds and convert them
+ // to milliseconds and use DoubleHistogram. This should minimize the chance of a data
+ // point fall on the bucket boundary that causes off by one errors.
+ private final DoubleHistogram operationLatenciesHistogram;
+ private final DoubleHistogram attemptLatenciesHistogram;
+ private final DoubleHistogram serverLatenciesHistogram;
+ private final DoubleHistogram firstResponseLatenciesHistogram;
+ private final DoubleHistogram clientBlockingLatenciesHistogram;
+ private final DoubleHistogram applicationBlockingLatenciesHistogram;
+ private final LongCounter connectivityErrorCounter;
+ private final LongCounter retryCounter;
- @VisibleForTesting
BuiltinMetricsTracer(
- OperationType operationType, SpanName spanName, StatsRecorderWrapper recorder) {
+ OperationType operationType,
+ SpanName spanName,
+ Attributes attributes,
+ DoubleHistogram operationLatenciesHistogram,
+ DoubleHistogram attemptLatenciesHistogram,
+ DoubleHistogram serverLatenciesHistogram,
+ DoubleHistogram firstResponseLatenciesHistogram,
+ DoubleHistogram clientBlockingLatenciesHistogram,
+ DoubleHistogram applicationBlockingLatenciesHistogram,
+ LongCounter connectivityErrorCounter,
+ LongCounter retryCounter) {
this.operationType = operationType;
this.spanName = spanName;
- this.recorder = recorder;
+ this.baseAttributes = attributes;
+
+ this.operationLatenciesHistogram = operationLatenciesHistogram;
+ this.attemptLatenciesHistogram = attemptLatenciesHistogram;
+ this.serverLatenciesHistogram = serverLatenciesHistogram;
+ this.firstResponseLatenciesHistogram = firstResponseLatenciesHistogram;
+ this.clientBlockingLatenciesHistogram = clientBlockingLatenciesHistogram;
+ this.applicationBlockingLatenciesHistogram = applicationBlockingLatenciesHistogram;
+ this.connectivityErrorCounter = connectivityErrorCounter;
+ this.retryCounter = retryCounter;
}
@Override
@@ -203,13 +246,8 @@ public int getAttempt() {
@Override
public void recordGfeMetadata(@Nullable Long latency, @Nullable Throwable throwable) {
- // Record the metrics and put in the map after the attempt is done, so we can have cluster and
- // zone information
if (latency != null) {
- recorder.putGfeLatencies(latency);
- recorder.putGfeMissingHeaders(0);
- } else {
- recorder.putGfeMissingHeaders(1);
+ serverLatencies = latency;
}
}
@@ -220,13 +258,13 @@ public void setLocations(String zone, String cluster) {
}
@Override
- public void batchRequestThrottled(long throttledTimeMs) {
- totalClientBlockingTime.addAndGet(throttledTimeMs);
+ public void batchRequestThrottled(long throttledTimeNanos) {
+ totalClientBlockingTime.addAndGet(Duration.ofNanos(throttledTimeNanos).toMillis());
}
@Override
- public void grpcChannelQueuedLatencies(long queuedTimeMs) {
- totalClientBlockingTime.addAndGet(queuedTimeMs);
+ public void grpcChannelQueuedLatencies(long queuedTimeNanos) {
+ totalClientBlockingTime.addAndGet(queuedTimeNanos);
}
@Override
@@ -239,26 +277,43 @@ private void recordOperationCompletion(@Nullable Throwable status) {
return;
}
operationTimer.stop();
- long operationLatency = operationTimer.elapsed(TimeUnit.MILLISECONDS);
+
+ boolean isStreaming = operationType == OperationType.ServerStreaming;
+ String statusStr = Util.extractStatus(status);
+
+ // Publish metric data with all the attributes. The attributes get filtered in
+ // BuiltinMetricsConstants when we construct the views.
+ Attributes attributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, tableId)
+ .put(CLUSTER_ID_KEY, cluster)
+ .put(ZONE_ID_KEY, zone)
+ .put(METHOD_KEY, spanName.toString())
+ .put(CLIENT_NAME_KEY, NAME)
+ .put(STREAMING_KEY, isStreaming)
+ .put(STATUS_KEY, statusStr)
+ .build();
+
long operationLatencyNano = operationTimer.elapsed(TimeUnit.NANOSECONDS);
// Only record when retry count is greater than 0 so the retry
// graph will be less confusing
if (attemptCount > 1) {
- recorder.putRetryCount(attemptCount - 1);
+ retryCounter.add(attemptCount - 1, attributes);
}
+ operationLatenciesHistogram.record(convertToMs(operationLatencyNano), attributes);
+
// serverLatencyTimer should already be stopped in recordAttemptCompletion
- recorder.putOperationLatencies(operationLatency);
- recorder.putApplicationLatencies(
- Duration.ofNanos(operationLatencyNano - totalServerLatencyNano.get()).toMillis());
+ long applicationLatencyNano = operationLatencyNano - totalServerLatencyNano.get();
+ applicationBlockingLatenciesHistogram.record(convertToMs(applicationLatencyNano), attributes);
if (operationType == OperationType.ServerStreaming
&& spanName.getMethodName().equals("ReadRows")) {
- recorder.putFirstResponseLatencies(firstResponsePerOpTimer.elapsed(TimeUnit.MILLISECONDS));
+ firstResponseLatenciesHistogram.record(
+ convertToMs(firstResponsePerOpTimer.elapsed(TimeUnit.NANOSECONDS)), attributes);
}
-
- recorder.recordOperation(Util.extractStatus(status), tableId, zone, cluster);
}
private void recordAttemptCompletion(@Nullable Throwable status) {
@@ -273,8 +328,7 @@ private void recordAttemptCompletion(@Nullable Throwable status) {
}
}
- // Make sure to reset the blocking time after recording it for the next attempt
- recorder.putClientBlockingLatencies(totalClientBlockingTime.getAndSet(0));
+ boolean isStreaming = operationType == OperationType.ServerStreaming;
// Patch the status until it's fixed in gax. When an attempt failed,
// it'll throw a ServerStreamingAttemptException. Unwrap the exception
@@ -283,7 +337,35 @@ private void recordAttemptCompletion(@Nullable Throwable status) {
status = status.getCause();
}
- recorder.putAttemptLatencies(attemptTimer.elapsed(TimeUnit.MILLISECONDS));
- recorder.recordAttempt(Util.extractStatus(status), tableId, zone, cluster);
+ String statusStr = Util.extractStatus(status);
+
+ Attributes attributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, tableId)
+ .put(CLUSTER_ID_KEY, cluster)
+ .put(ZONE_ID_KEY, zone)
+ .put(METHOD_KEY, spanName.toString())
+ .put(CLIENT_NAME_KEY, NAME)
+ .put(STREAMING_KEY, isStreaming)
+ .put(STATUS_KEY, statusStr)
+ .build();
+
+ clientBlockingLatenciesHistogram.record(convertToMs(totalClientBlockingTime.get()), attributes);
+
+ attemptLatenciesHistogram.record(
+ convertToMs(attemptTimer.elapsed(TimeUnit.NANOSECONDS)), attributes);
+
+ if (serverLatencies != null) {
+ serverLatenciesHistogram.record(serverLatencies, attributes);
+ connectivityErrorCounter.add(0, attributes);
+ } else {
+ connectivityErrorCounter.add(1, attributes);
+ }
+ }
+
+ private static double convertToMs(long nanoSeconds) {
+ double toMs = 1e-6;
+ return nanoSeconds * toMs;
}
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java
index 794997071d..f0ac656978 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java
@@ -15,29 +15,112 @@
*/
package com.google.cloud.bigtable.data.v2.stub.metrics;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME;
+
import com.google.api.core.InternalApi;
import com.google.api.gax.tracing.ApiTracer;
import com.google.api.gax.tracing.ApiTracerFactory;
import com.google.api.gax.tracing.BaseApiTracerFactory;
import com.google.api.gax.tracing.SpanName;
-import com.google.cloud.bigtable.stats.StatsWrapper;
-import com.google.common.collect.ImmutableMap;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.metrics.DoubleHistogram;
+import io.opentelemetry.api.metrics.LongCounter;
+import io.opentelemetry.api.metrics.Meter;
+import java.io.IOException;
/**
- * {@link ApiTracerFactory} that will generate OpenCensus metrics by using the {@link ApiTracer}
+ * {@link ApiTracerFactory} that will generate OpenTelemetry metrics by using the {@link ApiTracer}
* api.
*/
@InternalApi("For internal use only")
public class BuiltinMetricsTracerFactory extends BaseApiTracerFactory {
- private final ImmutableMap statsAttributes;
+ private final Attributes attributes;
+
+ private static final String MILLISECOND = "ms";
+ private static final String COUNT = "1";
- public static BuiltinMetricsTracerFactory create(ImmutableMap statsAttributes) {
- return new BuiltinMetricsTracerFactory(statsAttributes);
+ private final DoubleHistogram operationLatenciesHistogram;
+ private final DoubleHistogram attemptLatenciesHistogram;
+ private final DoubleHistogram serverLatenciesHistogram;
+ private final DoubleHistogram firstResponseLatenciesHistogram;
+ private final DoubleHistogram clientBlockingLatenciesHistogram;
+ private final DoubleHistogram applicationBlockingLatenciesHistogram;
+ private final LongCounter connectivityErrorCounter;
+ private final LongCounter retryCounter;
+
+ public static BuiltinMetricsTracerFactory create(
+ OpenTelemetry openTelemetry, Attributes attributes) throws IOException {
+ return new BuiltinMetricsTracerFactory(openTelemetry, attributes);
}
- private BuiltinMetricsTracerFactory(ImmutableMap statsAttributes) {
- this.statsAttributes = statsAttributes;
+ BuiltinMetricsTracerFactory(OpenTelemetry openTelemetry, Attributes attributes) {
+ this.attributes = attributes;
+ Meter meter = openTelemetry.getMeter(METER_NAME);
+
+ operationLatenciesHistogram =
+ meter
+ .histogramBuilder(OPERATION_LATENCIES_NAME)
+ .setDescription(
+ "Total time until final operation success or failure, including retries and backoff.")
+ .setUnit(MILLISECOND)
+ .build();
+ attemptLatenciesHistogram =
+ meter
+ .histogramBuilder(ATTEMPT_LATENCIES_NAME)
+ .setDescription("Client observed latency per RPC attempt.")
+ .setUnit(MILLISECOND)
+ .build();
+ serverLatenciesHistogram =
+ meter
+ .histogramBuilder(SERVER_LATENCIES_NAME)
+ .setDescription(
+ "The latency measured from the moment that the RPC entered the Google data center until the RPC was completed.")
+ .setUnit(MILLISECOND)
+ .build();
+ firstResponseLatenciesHistogram =
+ meter
+ .histogramBuilder(FIRST_RESPONSE_LATENCIES_NAME)
+ .setDescription(
+ "Latency from operation start until the response headers were received. The publishing of the measurement will be delayed until the attempt response has been received.")
+ .setUnit(MILLISECOND)
+ .build();
+ clientBlockingLatenciesHistogram =
+ meter
+ .histogramBuilder(CLIENT_BLOCKING_LATENCIES_NAME)
+ .setDescription(
+ "The artificial latency introduced by the client to limit the number of outstanding requests. The publishing of the measurement will be delayed until the attempt trailers have been received.")
+ .setUnit(MILLISECOND)
+ .build();
+ applicationBlockingLatenciesHistogram =
+ meter
+ .histogramBuilder(APPLICATION_BLOCKING_LATENCIES_NAME)
+ .setDescription(
+ "The latency of the client application consuming available response data.")
+ .setUnit(MILLISECOND)
+ .build();
+ connectivityErrorCounter =
+ meter
+ .counterBuilder(CONNECTIVITY_ERROR_COUNT_NAME)
+ .setDescription(
+ "Number of requests that failed to reach the Google datacenter. (Requests without google response headers")
+ .setUnit(COUNT)
+ .build();
+ retryCounter =
+ meter
+ .counterBuilder(RETRY_COUNT_NAME)
+ .setDescription("The number of additional RPCs sent after the initial attempt.")
+ .setUnit(COUNT)
+ .build();
}
@Override
@@ -45,6 +128,14 @@ public ApiTracer newTracer(ApiTracer parent, SpanName spanName, OperationType op
return new BuiltinMetricsTracer(
operationType,
spanName,
- StatsWrapper.createRecorder(operationType, spanName, statsAttributes));
+ attributes,
+ operationLatenciesHistogram,
+ attemptLatenciesHistogram,
+ serverLatenciesHistogram,
+ firstResponseLatenciesHistogram,
+ clientBlockingLatenciesHistogram,
+ applicationBlockingLatenciesHistogram,
+ connectivityErrorCounter,
+ retryCounter);
}
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java
new file mode 100644
index 0000000000..445160a146
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.auth.Credentials;
+import com.google.auth.oauth2.GoogleCredentials;
+import io.opentelemetry.sdk.metrics.InstrumentSelector;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.View;
+import io.opentelemetry.sdk.metrics.export.MetricExporter;
+import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader;
+import java.io.IOException;
+import java.util.Map;
+import javax.annotation.Nullable;
+
+/**
+ * A util class to register built-in metrics on a custom OpenTelemetry instance. This is for
+ * advanced usage, and is only necessary when wanting to write built-in metrics to cloud monitoring
+ * and custom sinks. Please refer to {@link CustomOpenTelemetryMetricsProvider} for example usage.
+ */
+public class BuiltinMetricsView {
+
+ private BuiltinMetricsView() {}
+
+ /**
+ * Register built-in metrics on the {@link SdkMeterProviderBuilder} with application default
+ * credentials.
+ */
+ public static void registerBuiltinMetrics(String projectId, SdkMeterProviderBuilder builder)
+ throws IOException {
+ BuiltinMetricsView.registerBuiltinMetrics(
+ projectId, GoogleCredentials.getApplicationDefault(), builder);
+ }
+
+ /** Register built-in metrics on the {@link SdkMeterProviderBuilder} with credentials. */
+ public static void registerBuiltinMetrics(
+ String projectId, @Nullable Credentials credentials, SdkMeterProviderBuilder builder)
+ throws IOException {
+ MetricExporter metricExporter = BigtableCloudMonitoringExporter.create(projectId, credentials);
+ for (Map.Entry entry :
+ BuiltinMetricsConstants.getAllViews().entrySet()) {
+ builder.registerView(entry.getKey(), entry.getValue());
+ }
+ builder.registerMetricReader(PeriodicMetricReader.create(metricExporter));
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java
new file mode 100644
index 0000000000..8c1c5c1c90
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.common.base.MoreObjects;
+import io.opentelemetry.api.OpenTelemetry;
+
+/**
+ * Set a custom OpenTelemetry instance.
+ *
+ * To register client side metrics on the custom OpenTelemetry:
+ *
+ *
{@code
+ * SdkMeterProviderBuilder sdkMeterProvider = SdkMeterProvider.builder();
+ *
+ * // register Builtin metrics on your meter provider with default credentials
+ * BuiltinMetricsView.registerBuiltinMetrics("project-id", sdkMeterProvider);
+ *
+ * // register other metrics reader and views
+ * sdkMeterProvider.registerMetricReader(..);
+ * sdkMeterProvider.registerView(..);
+ *
+ * // create the OTEL instance
+ * OpenTelemetry openTelemetry = OpenTelemetrySdk
+ * .builder()
+ * .setMeterProvider(sdkMeterProvider.build())
+ * .build();
+ *
+ * // Override MetricsProvider in BigtableDataSettings
+ * BigtableDataSettings settings = BigtableDataSettings.newBuilder()
+ * .setProjectId("my-project")
+ * .setInstanceId("my-instance-id")
+ * .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)
+ * .build();
+ * }
+ */
+public final class CustomOpenTelemetryMetricsProvider implements MetricsProvider {
+
+ private final OpenTelemetry otel;
+
+ public static CustomOpenTelemetryMetricsProvider create(OpenTelemetry otel) {
+ return new CustomOpenTelemetryMetricsProvider(otel);
+ }
+
+ private CustomOpenTelemetryMetricsProvider(OpenTelemetry otel) {
+ this.otel = otel;
+ }
+
+ public OpenTelemetry getOpenTelemetry() {
+ return otel;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).add("openTelemetry", otel).toString();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java
new file mode 100644
index 0000000000..b8aad8c931
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.api.core.InternalApi;
+import com.google.auth.Credentials;
+import com.google.common.base.MoreObjects;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import java.io.IOException;
+import javax.annotation.Nullable;
+
+/**
+ * Set {@link
+ * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)},
+ * to {@link this#INSTANCE} to enable collecting and export client side metrics
+ * https://cloud.google.com/bigtable/docs/client-side-metrics. This is the default setting in {@link
+ * com.google.cloud.bigtable.data.v2.BigtableDataSettings}.
+ */
+public final class DefaultMetricsProvider implements MetricsProvider {
+
+ public static DefaultMetricsProvider INSTANCE = new DefaultMetricsProvider();
+
+ private OpenTelemetry openTelemetry;
+ private String projectId;
+
+ private DefaultMetricsProvider() {}
+
+ @InternalApi
+ public OpenTelemetry getOpenTelemetry(String projectId, @Nullable Credentials credentials)
+ throws IOException {
+ this.projectId = projectId;
+ if (openTelemetry == null) {
+ SdkMeterProviderBuilder meterProvider = SdkMeterProvider.builder();
+ BuiltinMetricsView.registerBuiltinMetrics(projectId, credentials, meterProvider);
+ openTelemetry = OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+ }
+ return openTelemetry;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this)
+ .add("projectId", projectId)
+ .add("openTelemetry", openTelemetry)
+ .toString();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java
index cab3b0bbd0..a891df9509 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java
@@ -15,12 +15,15 @@
*/
package com.google.cloud.bigtable.data.v2.stub.metrics;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME;
+
import com.google.api.core.InternalApi;
-import com.google.cloud.bigtable.stats.StatsRecorderWrapperForConnection;
-import com.google.cloud.bigtable.stats.StatsWrapper;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableMap;
import io.grpc.ClientInterceptor;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.metrics.LongHistogram;
+import io.opentelemetry.api.metrics.Meter;
import java.util.Collections;
import java.util.Set;
import java.util.WeakHashMap;
@@ -30,24 +33,30 @@
/* Background task that goes through all connections and updates the errors_per_connection metric. */
@InternalApi("For internal use only")
public class ErrorCountPerConnectionMetricTracker implements Runnable {
+
private static final Integer PER_CONNECTION_ERROR_COUNT_PERIOD_SECONDS = 60;
+
+ private final LongHistogram perConnectionErrorCountHistogram;
+ private final Attributes attributes;
+
private final Set connectionErrorCountInterceptors;
private final Object interceptorsLock = new Object();
- // This is not final so that it can be updated and mocked during testing.
- private StatsRecorderWrapperForConnection statsRecorderWrapperForConnection;
- @VisibleForTesting
- void setStatsRecorderWrapperForConnection(
- StatsRecorderWrapperForConnection statsRecorderWrapperForConnection) {
- this.statsRecorderWrapperForConnection = statsRecorderWrapperForConnection;
- }
-
- public ErrorCountPerConnectionMetricTracker(ImmutableMap builtinAttributes) {
+ public ErrorCountPerConnectionMetricTracker(OpenTelemetry openTelemetry, Attributes attributes) {
connectionErrorCountInterceptors =
Collections.synchronizedSet(Collections.newSetFromMap(new WeakHashMap<>()));
- this.statsRecorderWrapperForConnection =
- StatsWrapper.createRecorderForConnection(builtinAttributes);
+ Meter meter = openTelemetry.getMeter(METER_NAME);
+
+ perConnectionErrorCountHistogram =
+ meter
+ .histogramBuilder(PER_CONNECTION_ERROR_COUNT_NAME)
+ .ofLongs()
+ .setDescription("Distribution of counts of channels per 'error count per minute'.")
+ .setUnit("1")
+ .build();
+
+ this.attributes = attributes;
}
public void startConnectionErrorCountTracker(ScheduledExecutorService scheduler) {
@@ -75,7 +84,7 @@ public void run() {
if (errors > 0 || successes > 0) {
// TODO: add a metric to also keep track of the number of successful requests per each
// connection.
- statsRecorderWrapperForConnection.putAndRecordPerConnectionErrorCount(errors);
+ perConnectionErrorCountHistogram.record(errors, attributes);
}
}
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java
new file mode 100644
index 0000000000..251bb41619
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.api.core.InternalExtensionOnly;
+
+/**
+ * Provide client side metrics https://cloud.google.com/bigtable/docs/client-side-metrics
+ * implementations.
+ */
+@InternalExtensionOnly
+public interface MetricsProvider {}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java
new file mode 100644
index 0000000000..9a00ddb135
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Set {@link
+ * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)},
+ * to {@link this#INSTANCE} to disable collecting and export client side metrics
+ * https://cloud.google.com/bigtable/docs/client-side-metrics.
+ */
+public final class NoopMetricsProvider implements MetricsProvider {
+
+ public static NoopMetricsProvider INSTANCE = new NoopMetricsProvider();
+
+ private NoopMetricsProvider() {}
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).toString();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java
index b7140f0156..ce73d75dc1 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java
@@ -21,6 +21,7 @@
import com.google.api.gax.rpc.ApiCallContext;
import com.google.api.gax.rpc.UnaryCallable;
import com.google.api.gax.tracing.ApiTracer;
+import org.threeten.bp.Duration;
/**
* This callable will extract total throttled time from {@link ApiCallContext} and add it to {@link
@@ -42,7 +43,8 @@ public ApiFuture futureCall(RequestT request, ApiCallContext context)
// this should always be true
if (tracer instanceof BigtableTracer) {
((BigtableTracer) tracer)
- .batchRequestThrottled(context.getOption(Batcher.THROTTLED_TIME_KEY));
+ .batchRequestThrottled(
+ Duration.ofMillis(context.getOption(Batcher.THROTTLED_TIME_KEY)).toNanos());
}
}
return innerCallable.futureCall(request, context);
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/it/BigtableInstanceAdminClientIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/it/BigtableInstanceAdminClientIT.java
index d8b9410cae..76413165bd 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/it/BigtableInstanceAdminClientIT.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/it/BigtableInstanceAdminClientIT.java
@@ -242,6 +242,43 @@ public void appProfileTestPriority() {
}
}
+ @Test
+ public void appProfileTestDataBoost() {
+ String newInstanceId = prefixGenerator.newPrefix();
+ String newClusterId = newInstanceId + "-c1";
+
+ client.createInstance(
+ CreateInstanceRequest.of(newInstanceId)
+ .addCluster(newClusterId, testEnvRule.env().getPrimaryZone(), 1, StorageType.SSD)
+ .setDisplayName("Priority-Instance-Test")
+ .addLabel("state", "readytodelete")
+ .setType(Type.PRODUCTION));
+
+ try {
+ assertThat(client.exists(newInstanceId)).isTrue();
+
+ String testAppProfile = prefixGenerator.newPrefix();
+
+ CreateAppProfileRequest request =
+ CreateAppProfileRequest.of(newInstanceId, testAppProfile)
+ .setRoutingPolicy(AppProfile.SingleClusterRoutingPolicy.of(newClusterId))
+ .setIsolationPolicy(
+ AppProfile.DataBoostIsolationReadOnlyPolicy.of(
+ AppProfile.ComputeBillingOwner.HOST_PAYS))
+ .setDescription("databoost app profile");
+
+ AppProfile newlyCreateAppProfile = client.createAppProfile(request);
+ AppProfile.ComputeBillingOwner computeBillingOwner =
+ ((AppProfile.DataBoostIsolationReadOnlyPolicy) newlyCreateAppProfile.getIsolationPolicy())
+ .getComputeBillingOwner();
+ assertThat(computeBillingOwner).isEqualTo(AppProfile.ComputeBillingOwner.HOST_PAYS);
+ } finally {
+ if (client.exists(newInstanceId)) {
+ client.deleteInstance(newInstanceId);
+ }
+ }
+ }
+
@Test
public void iamUpdateTest() {
Policy policy = client.getIamPolicy(instanceId);
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/AppProfileTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/AppProfileTest.java
index 35711cefdb..8215e5f8fc 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/AppProfileTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/AppProfileTest.java
@@ -234,4 +234,61 @@ public void testHashCode() {
assertThat(updateAppProfileRequest.hashCode())
.isNotEqualTo(updateAppProfileRequest3.hashCode());
}
+
+ @Test
+ public void testFromProtoWithDataBoostIsolation() {
+ AppProfile producer =
+ AppProfile.fromProto(
+ com.google.bigtable.admin.v2.AppProfile.newBuilder()
+ .setName(AppProfileName.of("my-project", "my-instance", "my-profile").toString())
+ .setDescription("my description")
+ .setSingleClusterRouting(
+ SingleClusterRouting.newBuilder()
+ .setClusterId("my-cluster")
+ .setAllowTransactionalWrites(true)
+ .build())
+ .setDataBoostIsolationReadOnly(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.newBuilder()
+ .setComputeBillingOwner(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ .ComputeBillingOwner.HOST_PAYS))
+ .setEtag("my-etag")
+ .build());
+
+ assertThat(producer.getInstanceId()).isEqualTo("my-instance");
+ assertThat(producer.getId()).isEqualTo("my-profile");
+ assertThat(producer.getDescription()).isEqualTo("my description");
+ assertThat(producer.getPolicy()).isEqualTo(SingleClusterRoutingPolicy.of("my-cluster", true));
+ assertThat(producer.getIsolationPolicy())
+ .isEqualTo(
+ AppProfile.DataBoostIsolationReadOnlyPolicy.of(
+ AppProfile.ComputeBillingOwner.HOST_PAYS));
+
+ AppProfile consumer =
+ AppProfile.fromProto(
+ com.google.bigtable.admin.v2.AppProfile.newBuilder()
+ .setName(AppProfileName.of("my-project", "my-instance", "my-profile").toString())
+ .setDescription("my description")
+ .setSingleClusterRouting(
+ SingleClusterRouting.newBuilder()
+ .setClusterId("my-cluster")
+ .setAllowTransactionalWrites(true)
+ .build())
+ .setDataBoostIsolationReadOnly(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.newBuilder()
+ .setComputeBillingOwner(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ .ComputeBillingOwner.COMPUTE_BILLING_OWNER_UNSPECIFIED))
+ .setEtag("my-etag")
+ .build());
+
+ assertThat(consumer.getInstanceId()).isEqualTo("my-instance");
+ assertThat(consumer.getId()).isEqualTo("my-profile");
+ assertThat(consumer.getDescription()).isEqualTo("my description");
+ assertThat(consumer.getPolicy()).isEqualTo(SingleClusterRoutingPolicy.of("my-cluster", true));
+ assertThat(consumer.getIsolationPolicy())
+ .isEqualTo(
+ AppProfile.DataBoostIsolationReadOnlyPolicy.of(
+ AppProfile.ComputeBillingOwner.UNSPECIFIED));
+ }
}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequestTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequestTest.java
index 4e5812f774..088dc2bcfe 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequestTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/CreateAppProfileRequestTest.java
@@ -17,6 +17,7 @@
import static com.google.common.truth.Truth.assertThat;
+import com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly;
import com.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny;
import com.google.bigtable.admin.v2.AppProfile.SingleClusterRouting;
import com.google.bigtable.admin.v2.AppProfile.StandardIsolation;
@@ -84,4 +85,20 @@ public void testStandardIsolation() {
assertThat(wrapper.toProto("my-project").getAppProfile().getStandardIsolation())
.isEqualTo(StandardIsolation.getDefaultInstance());
}
+
+ @Test
+ public void testDataBoostIsolationReadOnly() {
+ CreateAppProfileRequest wrapper =
+ CreateAppProfileRequest.of("my-instance", "my-profile")
+ .setRoutingPolicy(MultiClusterRoutingPolicy.of())
+ .setIsolationPolicy(
+ AppProfile.DataBoostIsolationReadOnlyPolicy.of(
+ AppProfile.ComputeBillingOwner.HOST_PAYS));
+
+ assertThat(wrapper.toProto("my-project").getAppProfile().getDataBoostIsolationReadOnly())
+ .isEqualTo(
+ DataBoostIsolationReadOnly.newBuilder()
+ .setComputeBillingOwner(DataBoostIsolationReadOnly.ComputeBillingOwner.HOST_PAYS)
+ .build());
+ }
}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequestTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequestTest.java
index 13e98f14c1..04cf3f0813 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequestTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/UpdateAppProfileRequestTest.java
@@ -17,6 +17,7 @@
import static com.google.common.truth.Truth.assertThat;
+import com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly;
import com.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny;
import com.google.bigtable.admin.v2.AppProfile.SingleClusterRouting;
import com.google.bigtable.admin.v2.AppProfile.StandardIsolation;
@@ -111,4 +112,38 @@ public void testUpdateExistingStandardIsolation() {
.setUpdateMask(FieldMask.newBuilder().addPaths("standard_isolation"))
.build());
}
+
+ @Test
+ public void testUpdateExistingDataBoostIsolationReadOnly() {
+ com.google.bigtable.admin.v2.AppProfile existingProto =
+ com.google.bigtable.admin.v2.AppProfile.newBuilder()
+ .setName("projects/my-project/instances/my-instance/appProfiles/my-profile")
+ .setEtag("my-etag")
+ .setDescription("description")
+ .setMultiClusterRoutingUseAny(MultiClusterRoutingUseAny.getDefaultInstance())
+ .setStandardIsolation(StandardIsolation.getDefaultInstance())
+ .build();
+
+ AppProfile existingWrapper = AppProfile.fromProto(existingProto);
+
+ UpdateAppProfileRequest updateWrapper =
+ UpdateAppProfileRequest.of(existingWrapper)
+ .setIsolationPolicy(
+ AppProfile.DataBoostIsolationReadOnlyPolicy.of(
+ AppProfile.ComputeBillingOwner.HOST_PAYS));
+
+ assertThat(updateWrapper.toProto("my-project"))
+ .isEqualTo(
+ com.google.bigtable.admin.v2.UpdateAppProfileRequest.newBuilder()
+ .setAppProfile(
+ existingProto
+ .toBuilder()
+ .setDataBoostIsolationReadOnly(
+ DataBoostIsolationReadOnly.newBuilder()
+ .setComputeBillingOwner(
+ DataBoostIsolationReadOnly.ComputeBillingOwner.HOST_PAYS)
+ .build()))
+ .setUpdateMask(FieldMask.newBuilder().addPaths("data_boost_isolation_read_only"))
+ .build());
+ }
}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java
index a35112b380..fea66e82bf 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java
@@ -36,6 +36,7 @@
import com.google.bigtable.v2.ReadRowsResponse;
import com.google.cloud.bigtable.data.v2.internal.NameUtil;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
+import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider;
import com.google.common.base.Preconditions;
import com.google.common.io.BaseEncoding;
import io.grpc.Attributes;
@@ -169,10 +170,13 @@ public void tearDown() {
@Test
public void testNewClientsShareTransportChannel() throws Exception {
-
// Create 3 lightweight clients
-
- try (BigtableDataClientFactory factory = BigtableDataClientFactory.create(defaultSettings);
+ try (BigtableDataClientFactory factory =
+ BigtableDataClientFactory.create(
+ defaultSettings
+ .toBuilder()
+ .setMetricsProvider(NoopMetricsProvider.INSTANCE)
+ .build());
BigtableDataClient ignored1 = factory.createForInstance("project1", "instance1");
BigtableDataClient ignored2 = factory.createForInstance("project2", "instance2");
BigtableDataClient ignored3 = factory.createForInstance("project3", "instance3")) {
@@ -316,7 +320,7 @@ public void testFeatureFlags() throws Exception {
@Test
public void testBulkMutationFlowControllerConfigured() throws Exception {
BigtableDataSettings settings =
- BigtableDataSettings.newBuilder()
+ BigtableDataSettings.newBuilderForEmulator(server.getPort())
.setProjectId("my-project")
.setInstanceId("my-instance")
.setCredentialsProvider(credentialsProvider)
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java
index 4e75fb8631..56181a20ab 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java
@@ -15,34 +15,64 @@
*/
package com.google.cloud.bigtable.data.v2.it;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedValue;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getMetricData;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getStartTimeSeconds;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.verifyAttributes;
+import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.Truth.assertWithMessage;
import static com.google.common.truth.TruthJUnit.assume;
import com.google.api.client.util.Lists;
+import com.google.cloud.bigtable.admin.v2.BigtableInstanceAdminClient;
import com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient;
+import com.google.cloud.bigtable.admin.v2.models.AppProfile;
+import com.google.cloud.bigtable.admin.v2.models.CreateAppProfileRequest;
import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest;
import com.google.cloud.bigtable.admin.v2.models.Table;
+import com.google.cloud.bigtable.data.v2.BigtableDataClient;
import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.models.Query;
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView;
+import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv;
import com.google.cloud.bigtable.test_helpers.env.PrefixGenerator;
import com.google.cloud.bigtable.test_helpers.env.TestEnvRule;
import com.google.cloud.monitoring.v3.MetricServiceClient;
import com.google.common.base.Stopwatch;
+import com.google.common.collect.BoundType;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Range;
import com.google.monitoring.v3.ListTimeSeriesRequest;
import com.google.monitoring.v3.ListTimeSeriesResponse;
+import com.google.monitoring.v3.Point;
import com.google.monitoring.v3.ProjectName;
import com.google.monitoring.v3.TimeInterval;
+import com.google.monitoring.v3.TimeSeries;
+import com.google.protobuf.Timestamp;
import com.google.protobuf.util.Timestamps;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.common.AttributesBuilder;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import java.util.stream.Collectors;
+import org.junit.After;
+import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
@@ -50,6 +80,7 @@
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import org.threeten.bp.Duration;
+import org.threeten.bp.Instant;
@RunWith(JUnit4.class)
public class BuiltinMetricsIT {
@@ -58,71 +89,131 @@ public class BuiltinMetricsIT {
private static final Logger logger = Logger.getLogger(BuiltinMetricsIT.class.getName());
@Rule public Timeout globalTimeout = Timeout.seconds(900);
- private static Table table;
- private static BigtableTableAdminClient tableAdminClient;
- private static MetricServiceClient metricClient;
+
+ private Table tableCustomOtel;
+ private Table tableDefault;
+ private BigtableDataClient clientCustomOtel;
+ private BigtableDataClient clientDefault;
+ private BigtableTableAdminClient tableAdminClient;
+ private BigtableInstanceAdminClient instanceAdminClient;
+ private MetricServiceClient metricClient;
+
+ private InMemoryMetricReader metricReader;
+ private String appProfileCustomOtel;
+ private String appProfileDefault;
public static String[] VIEWS = {
"operation_latencies",
"attempt_latencies",
"connectivity_error_count",
- "application_blocking_latencies"
+ "application_blocking_latencies",
};
- @BeforeClass
- public static void setUpClass() throws IOException {
+ @Before
+ public void setup() throws IOException {
+ // This test tests 2 things. End-to-end test using the default OTEL instance created by the
+ // client, and also end-to-end test using a custom OTEL instance set by the customer. In
+ // both tests, a BigtableCloudMonitoringExporter is created to export data to Cloud Monitoring.
assume()
.withMessage("Builtin metrics integration test is not supported by emulator")
.that(testEnvRule.env())
.isNotInstanceOf(EmulatorEnv.class);
- // Enable built in metrics
- BigtableDataSettings.enableBuiltinMetrics();
-
// Create a cloud monitoring client
metricClient = MetricServiceClient.create();
tableAdminClient = testEnvRule.env().getTableAdminClient();
+ instanceAdminClient = testEnvRule.env().getInstanceAdminClient();
+ appProfileCustomOtel = PrefixGenerator.newPrefix("test1");
+ appProfileDefault = PrefixGenerator.newPrefix("test2");
+ instanceAdminClient.createAppProfile(
+ CreateAppProfileRequest.of(testEnvRule.env().getInstanceId(), appProfileCustomOtel)
+ .setRoutingPolicy(
+ AppProfile.SingleClusterRoutingPolicy.of(testEnvRule.env().getPrimaryClusterId()))
+ .setIsolationPolicy(AppProfile.StandardIsolationPolicy.of(AppProfile.Priority.LOW)));
+ instanceAdminClient.createAppProfile(
+ CreateAppProfileRequest.of(testEnvRule.env().getInstanceId(), appProfileDefault)
+ .setRoutingPolicy(
+ AppProfile.SingleClusterRoutingPolicy.of(testEnvRule.env().getPrimaryClusterId()))
+ .setIsolationPolicy(AppProfile.StandardIsolationPolicy.of(AppProfile.Priority.LOW)));
+
+ // When using the custom OTEL instance, we can also register a InMemoryMetricReader on the
+ // SdkMeterProvider to verify the data exported on Cloud Monitoring with the in memory metric
+ // data collected in InMemoryMetricReader.
+ metricReader = InMemoryMetricReader.create();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+ BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider);
+ OpenTelemetry openTelemetry =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+
+ BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder();
+
+ clientCustomOtel =
+ BigtableDataClient.create(
+ settings
+ .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry))
+ .setAppProfileId(appProfileCustomOtel)
+ .build());
+ clientDefault = BigtableDataClient.create(settings.setAppProfileId(appProfileDefault).build());
}
- @AfterClass
- public static void tearDown() {
+ @After
+ public void tearDown() {
if (metricClient != null) {
metricClient.close();
}
- if (table != null) {
- tableAdminClient.deleteTable(table.getId());
+ if (tableCustomOtel != null) {
+ tableAdminClient.deleteTable(tableCustomOtel.getId());
+ }
+ if (tableDefault != null) {
+ tableAdminClient.deleteTable(tableDefault.getId());
+ }
+ if (instanceAdminClient != null) {
+ instanceAdminClient.deleteAppProfile(
+ testEnvRule.env().getInstanceId(), appProfileCustomOtel, true);
+ instanceAdminClient.deleteAppProfile(
+ testEnvRule.env().getInstanceId(), appProfileDefault, true);
+ }
+ if (clientCustomOtel != null) {
+ clientCustomOtel.close();
+ }
+ if (clientDefault != null) {
+ clientDefault.close();
}
}
@Test
- public void testBuiltinMetrics() throws Exception {
- logger.info("Started testing builtin metrics");
- table =
+ public void testBuiltinMetricsWithDefaultOTEL() throws Exception {
+ logger.info("Started testing builtin metrics with default OTEL");
+ tableDefault =
tableAdminClient.createTable(
- CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test"))
+ CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test1"))
.addFamily("cf"));
- logger.info("Create table: " + table.getId());
- // Send a MutateRow and ReadRows request
- testEnvRule
- .env()
- .getDataClient()
- .mutateRow(RowMutation.create(table.getId(), "a-new-key").setCell("cf", "q", "abc"));
+ logger.info("Create default table: " + tableDefault.getId());
+
+ Instant start = Instant.now().minus(Duration.ofSeconds(10));
+
+ // Send a MutateRow and ReadRows request and measure the latencies for these requests.
+ clientDefault.mutateRow(
+ RowMutation.create(tableDefault.getId(), "a-new-key").setCell("cf", "q", "abc"));
ArrayList rows =
- Lists.newArrayList(
- testEnvRule.env().getDataClient().readRows(Query.create(table.getId()).limit(10)));
+ Lists.newArrayList(clientDefault.readRows(Query.create(tableDefault.getId()).limit(10)));
- Stopwatch stopwatch = Stopwatch.createStarted();
+ // This stopwatch is used for to limit fetching of metric data in verifyMetrics
+ Stopwatch metricsPollingStopwatch = Stopwatch.createStarted();
ProjectName name = ProjectName.of(testEnvRule.env().getProjectId());
- // Restrict time to last 10 minutes and 5 minutes after the request
- long startMillis = System.currentTimeMillis() - Duration.ofMinutes(10).toMillis();
- long endMillis = startMillis + Duration.ofMinutes(15).toMillis();
+ // Interval is set in the monarch request when query metric timestamps.
+ // Restrict it to before we send to request and 3 minute after we send the request. If
+ // it turns out to be still flaky we can increase the filter range.
+ Instant end = Instant.now().plus(Duration.ofMinutes(3));
TimeInterval interval =
TimeInterval.newBuilder()
- .setStartTime(Timestamps.fromMillis(startMillis))
- .setEndTime(Timestamps.fromMillis(endMillis))
+ .setStartTime(Timestamps.fromMillis(start.toEpochMilli()))
+ .setEndTime(Timestamps.fromMillis(end.toEpochMilli()))
.build();
for (String view : VIEWS) {
@@ -132,42 +223,123 @@ public void testBuiltinMetrics() throws Exception {
String.format(
"metric.type=\"bigtable.googleapis.com/client/%s\" "
+ "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.MutateRow\""
- + " AND resource.labels.table=\"%s\"",
- view, testEnvRule.env().getInstanceId(), table.getId());
+ + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"",
+ view, testEnvRule.env().getInstanceId(), tableDefault.getId(), appProfileDefault);
ListTimeSeriesRequest.Builder requestBuilder =
ListTimeSeriesRequest.newBuilder()
.setName(name.toString())
.setFilter(metricFilter)
.setInterval(interval)
.setView(ListTimeSeriesRequest.TimeSeriesView.FULL);
-
- verifyMetricsArePublished(requestBuilder.build(), stopwatch, view);
+ verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view);
// Verify that metrics are published for ReadRows request
metricFilter =
String.format(
"metric.type=\"bigtable.googleapis.com/client/%s\" "
+ "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.ReadRows\""
- + " AND resource.labels.table=\"%s\"",
- view, testEnvRule.env().getInstanceId(), table.getId());
+ + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"",
+ view, testEnvRule.env().getInstanceId(), tableDefault.getId(), appProfileDefault);
+ requestBuilder.setFilter(metricFilter);
+
+ verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view);
+ }
+ }
+
+ @Test
+ public void testBuiltinMetricsWithCustomOTEL() throws Exception {
+ logger.info("Started testing builtin metrics with custom OTEL");
+ tableCustomOtel =
+ tableAdminClient.createTable(
+ CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test2"))
+ .addFamily("cf"));
+ logger.info("Create custom table: " + tableCustomOtel.getId());
+
+ Instant start = Instant.now().minus(Duration.ofSeconds(10));
+ // Send a MutateRow and ReadRows request and measure the latencies for these requests.
+ clientCustomOtel.mutateRow(
+ RowMutation.create(tableCustomOtel.getId(), "a-new-key").setCell("cf", "q", "abc"));
+ ArrayList rows =
+ Lists.newArrayList(
+ clientCustomOtel.readRows(Query.create(tableCustomOtel.getId()).limit(10)));
+
+ // This stopwatch is used for to limit fetching of metric data in verifyMetrics
+ Stopwatch metricsPollingStopwatch = Stopwatch.createStarted();
+
+ ProjectName name = ProjectName.of(testEnvRule.env().getProjectId());
+
+ Collection fromMetricReader = metricReader.collectAllMetrics();
+
+ // Interval is set in the monarch request when query metric timestamps.
+ // Restrict it to before we send to request and 3 minute after we send the request. If
+ // it turns out to be still flaky we can increase the filter range.
+ Instant end = start.plus(Duration.ofMinutes(3));
+ TimeInterval interval =
+ TimeInterval.newBuilder()
+ .setStartTime(Timestamps.fromMillis(start.toEpochMilli()))
+ .setEndTime(Timestamps.fromMillis(end.toEpochMilli()))
+ .build();
+
+ for (String view : VIEWS) {
+ String otelMetricName = view;
+ if (view.equals("application_blocking_latencies")) {
+ otelMetricName = "application_latencies";
+ }
+ MetricData dataFromReader = getMetricData(fromMetricReader, otelMetricName);
+
+ // Filter on instance and method name
+ // Verify that metrics are correct for MutateRows request
+ String metricFilter =
+ String.format(
+ "metric.type=\"bigtable.googleapis.com/client/%s\" "
+ + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.MutateRow\""
+ + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"",
+ view,
+ testEnvRule.env().getInstanceId(),
+ tableCustomOtel.getId(),
+ appProfileCustomOtel);
+ ListTimeSeriesRequest.Builder requestBuilder =
+ ListTimeSeriesRequest.newBuilder()
+ .setName(name.toString())
+ .setFilter(metricFilter)
+ .setInterval(interval)
+ .setView(ListTimeSeriesRequest.TimeSeriesView.FULL);
+
+ ListTimeSeriesResponse response =
+ verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view);
+ verifyMetricsWithMetricsReader(response, dataFromReader);
+
+ // Verify that metrics are correct for ReadRows request
+ metricFilter =
+ String.format(
+ "metric.type=\"bigtable.googleapis.com/client/%s\" "
+ + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.ReadRows\""
+ + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"",
+ view,
+ testEnvRule.env().getInstanceId(),
+ tableCustomOtel.getId(),
+ appProfileCustomOtel);
requestBuilder.setFilter(metricFilter);
- verifyMetricsArePublished(requestBuilder.build(), stopwatch, view);
+ response = verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view);
+ verifyMetricsWithMetricsReader(response, dataFromReader);
}
}
- private void verifyMetricsArePublished(
- ListTimeSeriesRequest request, Stopwatch stopwatch, String view) throws Exception {
+ private ListTimeSeriesResponse verifyMetricsArePublished(
+ ListTimeSeriesRequest request, Stopwatch metricsPollingStopwatch, String view)
+ throws Exception {
ListTimeSeriesResponse response = metricClient.listTimeSeriesCallable().call(request);
- logger.log(
- Level.INFO,
- "Checking for view "
- + view
- + ", has timeseries="
- + response.getTimeSeriesCount()
- + " stopwatch elapsed "
- + stopwatch.elapsed(TimeUnit.MINUTES));
- while (response.getTimeSeriesCount() == 0 && stopwatch.elapsed(TimeUnit.MINUTES) < 10) {
+ while (response.getTimeSeriesCount() == 0
+ && metricsPollingStopwatch.elapsed(TimeUnit.MINUTES) < 10) {
+ logger.log(
+ Level.INFO,
+ "Checking for view "
+ + view
+ + ", has timeseries="
+ + response.getTimeSeriesCount()
+ + " stopwatch elapsed "
+ + metricsPollingStopwatch.elapsed(TimeUnit.MINUTES));
// Call listTimeSeries every minute
Thread.sleep(Duration.ofMinutes(1).toMillis());
response = metricClient.listTimeSeriesCallable().call(request);
@@ -176,5 +348,64 @@ private void verifyMetricsArePublished(
assertWithMessage("View " + view + " didn't return any data.")
.that(response.getTimeSeriesCount())
.isGreaterThan(0);
+
+ return response;
+ }
+
+ private void verifyMetricsWithMetricsReader(
+ ListTimeSeriesResponse response, MetricData dataFromReader) {
+ for (TimeSeries ts : response.getTimeSeriesList()) {
+ Map attributesMap =
+ ImmutableMap.builder()
+ .putAll(ts.getResource().getLabelsMap())
+ .putAll(ts.getMetric().getLabelsMap())
+ .build();
+ AttributesBuilder attributesBuilder = Attributes.builder();
+ String streamingKey = BuiltinMetricsConstants.STREAMING_KEY.getKey();
+ attributesMap.forEach(
+ (k, v) -> {
+ if (!k.equals(streamingKey)) {
+ attributesBuilder.put(k, v);
+ }
+ });
+ if (attributesMap.containsKey(streamingKey)) {
+ attributesBuilder.put(streamingKey, Boolean.parseBoolean(attributesMap.get(streamingKey)));
+ }
+ Attributes attributes = attributesBuilder.build();
+ verifyAttributes(dataFromReader, attributes);
+ long expectedValue = getAggregatedValue(dataFromReader, attributes);
+ Timestamp startTime = getStartTimeSeconds(dataFromReader, attributes);
+ assertThat(startTime.getSeconds()).isGreaterThan(0);
+ List point =
+ ts.getPointsList().stream()
+ .filter(
+ p ->
+ Timestamps.compare(p.getInterval().getStartTime(), startTime) >= 0
+ && Timestamps.compare(
+ p.getInterval().getStartTime(),
+ Timestamps.add(
+ startTime,
+ com.google.protobuf.Duration.newBuilder()
+ .setSeconds(60)
+ .build()))
+ < 0)
+ .collect(Collectors.toList());
+ if (point.size() > 0) {
+ long actualValue = (long) point.get(0).getValue().getDistributionValue().getMean();
+ assertWithMessage(
+ "actual value does not match expected value, actual value "
+ + actualValue
+ + " expected value "
+ + expectedValue
+ + " actual start time "
+ + point.get(0).getInterval().getStartTime()
+ + " expected start time "
+ + startTime)
+ .that(actualValue)
+ .isIn(
+ Range.range(
+ expectedValue - 1, BoundType.CLOSED, expectedValue + 1, BoundType.CLOSED));
+ }
+ }
}
}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java
new file mode 100644
index 0000000000..56f6bfa476
--- /dev/null
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.it;
+
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
+import com.google.common.truth.Correspondence;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.PointData;
+
+public class MetricsITUtils {
+
+ static final Correspondence METRIC_DATA_NAME_CONTAINS =
+ Correspondence.from((md, s) -> md.getName().contains(s), "contains name");
+
+ static final Correspondence POINT_DATA_CLUSTER_ID_CONTAINS =
+ Correspondence.from(
+ (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY).contains(s),
+ "contains attributes");
+
+ static final Correspondence POINT_DATA_ZONE_ID_CONTAINS =
+ Correspondence.from(
+ (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY).contains(s),
+ "contains attributes");
+}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java
index b0e12d5ade..84ab24f1c8 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java
@@ -15,37 +15,76 @@
*/
package com.google.cloud.bigtable.data.v2.it;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.METRIC_DATA_NAME_CONTAINS;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_CLUSTER_ID_CONTAINS;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_ZONE_ID_CONTAINS;
import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.TruthJUnit.assume;
import com.google.api.core.ApiFuture;
import com.google.api.gax.rpc.NotFoundException;
import com.google.cloud.bigtable.admin.v2.models.Cluster;
+import com.google.cloud.bigtable.data.v2.BigtableDataClient;
+import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.models.Query;
import com.google.cloud.bigtable.data.v2.models.Row;
-import com.google.cloud.bigtable.stats.BuiltinViews;
-import com.google.cloud.bigtable.stats.StatsWrapper;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView;
+import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv;
import com.google.cloud.bigtable.test_helpers.env.TestEnvRule;
import com.google.common.collect.Lists;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.PointData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
+import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
-import org.junit.BeforeClass;
+import java.util.stream.Collectors;
+import org.junit.After;
+import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
public class StreamingMetricsMetadataIT {
@ClassRule public static TestEnvRule testEnvRule = new TestEnvRule();
- @BeforeClass
- public static void setUpClass() {
+ private BigtableDataClient client;
+ private InMemoryMetricReader metricReader;
+
+ @Before
+ public void setup() throws IOException {
assume()
.withMessage("StreamingMetricsMetadataIT is not supported on Emulator")
.that(testEnvRule.env())
.isNotInstanceOf(EmulatorEnv.class);
- BuiltinViews.registerBigtableBuiltinViews();
+
+ BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder();
+
+ metricReader = InMemoryMetricReader.create();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+ BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider);
+ OpenTelemetry openTelemetry =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+
+ settings.setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry));
+ client = BigtableDataClient.create(settings.build());
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ if (client != null) {
+ client.close();
+ }
}
@Test
@@ -54,7 +93,7 @@ public void testSuccess() throws Exception {
String uniqueKey = prefix + "-read";
Query query = Query.create(testEnvRule.env().getTableId()).rowKey(uniqueKey);
- ArrayList rows = Lists.newArrayList(testEnvRule.env().getDataClient().readRows(query));
+ ArrayList rows = Lists.newArrayList(client.readRows(query));
ApiFuture> clustersFuture =
testEnvRule
@@ -64,27 +103,73 @@ public void testSuccess() throws Exception {
List clusters = clustersFuture.get(1, TimeUnit.MINUTES);
- // give opencensus some time to populate view data
- Thread.sleep(100);
+ Collection allMetricData = metricReader.collectAllMetrics();
+ List metrics =
+ metricReader.collectAllMetrics().stream()
+ .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME))
+ .collect(Collectors.toList());
+
+ assertThat(allMetricData)
+ .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
+ .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
+ assertThat(metrics).hasSize(1);
- List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings();
- assertThat(tagValueStrings).contains(clusters.get(0).getZone());
- assertThat(tagValueStrings).contains(clusters.get(0).getId());
+ MetricData metricData = metrics.get(0);
+ List pointData = new ArrayList<>(metricData.getData().getPoints());
+ List clusterAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
+ .collect(Collectors.toList());
+ List zoneAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
+ .collect(Collectors.toList());
+
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS)
+ .contains(clusters.get(0).getId());
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS)
+ .contains(clusters.get(0).getZone());
+ assertThat(clusterAttributes).contains(clusters.get(0).getId());
+ assertThat(zoneAttributes).contains(clusters.get(0).getZone());
}
@Test
- public void testFailure() throws InterruptedException {
+ public void testFailure() {
Query query = Query.create("non-exist-table");
try {
- Lists.newArrayList(testEnvRule.env().getDataClient().readRows(query));
+ Lists.newArrayList(client.readRows(query));
} catch (NotFoundException e) {
}
- // give opencensus some time to populate view data
- Thread.sleep(100);
+ Collection allMetricData = metricReader.collectAllMetrics();
+ List metrics =
+ metricReader.collectAllMetrics().stream()
+ .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME))
+ .collect(Collectors.toList());
+
+ assertThat(allMetricData)
+ .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
+ .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
+ assertThat(metrics).hasSize(1);
+
+ MetricData metricData = metrics.get(0);
+ List pointData = new ArrayList<>(metricData.getData().getPoints());
+ List clusterAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
+ .collect(Collectors.toList());
+ List zoneAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
+ .collect(Collectors.toList());
- List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings();
- assertThat(tagValueStrings).contains("unspecified");
- assertThat(tagValueStrings).contains("global");
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS)
+ .contains("unspecified");
+ assertThat(pointData).comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS).contains("global");
+ assertThat(clusterAttributes).contains("unspecified");
+ assertThat(zoneAttributes).contains("global");
}
}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java
index aa2a4317fc..42adb8ea6e 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java
@@ -15,35 +15,76 @@
*/
package com.google.cloud.bigtable.data.v2.it;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.METRIC_DATA_NAME_CONTAINS;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_CLUSTER_ID_CONTAINS;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_ZONE_ID_CONTAINS;
import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.TruthJUnit.assume;
import com.google.api.core.ApiFuture;
import com.google.api.gax.rpc.NotFoundException;
import com.google.cloud.bigtable.admin.v2.models.Cluster;
+import com.google.cloud.bigtable.data.v2.BigtableDataClient;
+import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
-import com.google.cloud.bigtable.stats.BuiltinViews;
-import com.google.cloud.bigtable.stats.StatsWrapper;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView;
+import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv;
import com.google.cloud.bigtable.test_helpers.env.TestEnvRule;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.PointData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
-import org.junit.BeforeClass;
+import java.util.stream.Collectors;
+import org.junit.After;
+import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
public class UnaryMetricsMetadataIT {
@ClassRule public static TestEnvRule testEnvRule = new TestEnvRule();
- @BeforeClass
- public static void setUpClass() {
+ private BigtableDataClient client;
+ private InMemoryMetricReader metricReader;
+
+ @Before
+ public void setup() throws IOException {
assume()
.withMessage("UnaryMetricsMetadataIT is not supported on Emulator")
.that(testEnvRule.env())
.isNotInstanceOf(EmulatorEnv.class);
- BuiltinViews.registerBigtableBuiltinViews();
+
+ BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder();
+
+ metricReader = InMemoryMetricReader.create();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+ BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider);
+ OpenTelemetry openTelemetry =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+
+ settings.setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry));
+
+ client = BigtableDataClient.create(settings.build());
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ if (client != null) {
+ client.close();
+ }
}
@Test
@@ -52,9 +93,7 @@ public void testSuccess() throws Exception {
String familyId = testEnvRule.env().getFamilyId();
ApiFuture future =
- testEnvRule
- .env()
- .getDataClient()
+ client
.mutateRowCallable()
.futureCall(
RowMutation.create(testEnvRule.env().getTableId(), rowKey)
@@ -69,18 +108,36 @@ public void testSuccess() throws Exception {
.listClustersAsync(testEnvRule.env().getInstanceId());
List clusters = clustersFuture.get(1, TimeUnit.MINUTES);
- // give opencensus some time to populate view data
- for (int i = 0; i < 10; i++) {
- if (StatsWrapper.getOperationLatencyViewTagValueStrings()
- .contains(clusters.get(0).getZone())) {
- break;
- }
- Thread.sleep(100);
- }
+ Collection allMetricData = metricReader.collectAllMetrics();
+ List metrics =
+ allMetricData.stream()
+ .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME))
+ .collect(Collectors.toList());
+
+ assertThat(allMetricData)
+ .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
+ .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
+ assertThat(metrics).hasSize(1);
+
+ MetricData metricData = metrics.get(0);
+ List pointData = new ArrayList<>(metricData.getData().getPoints());
+ List clusterAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
+ .collect(Collectors.toList());
+ List zoneAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
+ .collect(Collectors.toList());
- List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings();
- assertThat(tagValueStrings).contains(clusters.get(0).getZone());
- assertThat(tagValueStrings).contains(clusters.get(0).getId());
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS)
+ .contains(clusters.get(0).getId());
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS)
+ .contains(clusters.get(0).getZone());
+ assertThat(clusterAttributes).contains(clusters.get(0).getId());
+ assertThat(zoneAttributes).contains(clusters.get(0).getZone());
}
@Test
@@ -89,9 +146,7 @@ public void testFailure() throws Exception {
String familyId = testEnvRule.env().getFamilyId();
ApiFuture future =
- testEnvRule
- .env()
- .getDataClient()
+ client
.mutateRowCallable()
.futureCall(
RowMutation.create("non-exist-table", rowKey).setCell(familyId, "q", "myVal"));
@@ -106,16 +161,39 @@ public void testFailure() throws Exception {
}
}
- // give opencensus some time to populate view data
- for (int i = 0; i < 10; i++) {
- if (StatsWrapper.getOperationLatencyViewTagValueStrings().contains("unspecified")) {
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = null;
+ for (MetricData md : allMetricData) {
+ if (md.getName()
+ .equals(
+ BuiltinMetricsConstants.METER_NAME
+ + BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME)) {
+ metricData = md;
break;
}
- Thread.sleep(100);
}
- List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings();
- assertThat(tagValueStrings).contains("unspecified");
- assertThat(tagValueStrings).contains("global");
+ assertThat(allMetricData)
+ .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
+ .contains(BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME);
+ assertThat(metricData).isNotNull();
+
+ List pointData = new ArrayList<>(metricData.getData().getPoints());
+
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS)
+ .contains("unspecified");
+ assertThat(pointData).comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS).contains("global");
+ List clusterAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
+ .collect(Collectors.toList());
+ List zoneAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
+ .collect(Collectors.toList());
+
+ assertThat(clusterAttributes).contains("unspecified");
+ assertThat(zoneAttributes).contains("global");
}
}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java
index 79cbccb0ac..290fcc321f 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java
@@ -885,6 +885,7 @@ public void enableRetryInfoFalseValueTest() throws IOException {
"generateInitialChangeStreamPartitionsSettings",
"readChangeStreamSettings",
"pingAndWarmSettings",
+ "metricsProvider",
};
@Test
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java
index 1975d0da25..abbf46c468 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java
@@ -18,12 +18,9 @@
import static com.google.common.truth.Truth.assertThat;
import static org.junit.Assert.assertThrows;
-import com.google.api.gax.core.NoCredentialsProvider;
import com.google.api.gax.grpc.GrpcStatusCode;
-import com.google.api.gax.grpc.GrpcTransportChannel;
import com.google.api.gax.rpc.ApiException;
import com.google.api.gax.rpc.ErrorDetails;
-import com.google.api.gax.rpc.FixedTransportChannelProvider;
import com.google.api.gax.rpc.InternalException;
import com.google.api.gax.rpc.UnavailableException;
import com.google.bigtable.v2.BigtableGrpc;
@@ -45,6 +42,7 @@
import com.google.bigtable.v2.SampleRowKeysResponse;
import com.google.cloud.bigtable.data.v2.BigtableDataClient;
import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
+import com.google.cloud.bigtable.data.v2.FakeServiceBuilder;
import com.google.cloud.bigtable.data.v2.models.BulkMutation;
import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation;
import com.google.cloud.bigtable.data.v2.models.Filters;
@@ -55,22 +53,31 @@
import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
import com.google.cloud.bigtable.data.v2.models.RowMutationEntry;
+import com.google.cloud.bigtable.data.v2.models.TableId;
import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Queues;
import com.google.protobuf.Any;
import com.google.rpc.RetryInfo;
+import io.grpc.ForwardingServerCall;
import io.grpc.Metadata;
+import io.grpc.MethodDescriptor;
+import io.grpc.Server;
+import io.grpc.ServerCall;
+import io.grpc.ServerCallHandler;
+import io.grpc.ServerInterceptor;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.grpc.stub.StreamObserver;
-import io.grpc.testing.GrpcServerRule;
import java.io.IOException;
import java.time.Duration;
+import java.util.HashSet;
import java.util.Queue;
+import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import org.junit.After;
import org.junit.Before;
-import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@@ -78,12 +85,13 @@
@RunWith(JUnit4.class)
public class RetryInfoTest {
- @Rule public GrpcServerRule serverRule = new GrpcServerRule();
-
private static final Metadata.Key ERROR_DETAILS_KEY =
Metadata.Key.of("grpc-status-details-bin", Metadata.BINARY_BYTE_MARSHALLER);
+ private final Set methods = new HashSet<>();
+
private FakeBigtableService service;
+ private Server server;
private BigtableDataClient client;
private BigtableDataSettings.Builder settings;
@@ -94,29 +102,111 @@ public class RetryInfoTest {
@Before
public void setUp() throws IOException {
service = new FakeBigtableService();
- serverRule.getServiceRegistry().addService(service);
+
+ ServerInterceptor serverInterceptor =
+ new ServerInterceptor() {
+ @Override
+ public ServerCall.Listener interceptCall(
+ ServerCall serverCall,
+ Metadata metadata,
+ ServerCallHandler serverCallHandler) {
+ return serverCallHandler.startCall(
+ new ForwardingServerCall.SimpleForwardingServerCall(serverCall) {
+ @Override
+ public void close(Status status, Metadata trailers) {
+ if (trailers.containsKey(ERROR_DETAILS_KEY)) {
+ methods.add(serverCall.getMethodDescriptor().getBareMethodName());
+ }
+ super.close(status, trailers);
+ }
+ },
+ metadata);
+ }
+ };
+ server = FakeServiceBuilder.create(service).intercept(serverInterceptor).start();
settings =
- BigtableDataSettings.newBuilder()
+ BigtableDataSettings.newBuilderForEmulator(server.getPort())
.setProjectId("fake-project")
- .setInstanceId("fake-instance")
- .setCredentialsProvider(NoCredentialsProvider.create());
-
- settings
- .stubSettings()
- .setTransportChannelProvider(
- FixedTransportChannelProvider.create(
- GrpcTransportChannel.create(serverRule.getChannel())))
- // channel priming doesn't work with FixedTransportChannelProvider. Disable it for the test
- .setRefreshingChannel(false)
- .build();
+ .setInstanceId("fake-instance");
this.client = BigtableDataClient.create(settings.build());
}
+ @After
+ public void tearDown() {
+ if (client != null) {
+ client.close();
+ }
+ if (server != null) {
+ server.shutdown();
+ }
+ }
+
@Test
- public void testReadRow() {
- verifyRetryInfoIsUsed(() -> client.readRow("table", "row"), true);
+ public void testAllMethods() {
+ // Verify retry info is handled correctly for all the methods in data API.
+ verifyRetryInfoIsUsed(() -> client.readRow(TableId.of("table"), "row"), true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () -> client.readRows(Query.create(TableId.of("table"))).iterator().hasNext(), true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () ->
+ client.bulkMutateRows(
+ BulkMutation.create(TableId.of("fake-table"))
+ .add(RowMutationEntry.create("row-key-1").setCell("cf", "q", "v"))),
+ true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () ->
+ client.mutateRow(
+ RowMutation.create(TableId.of("fake-table"), "key").setCell("cf", "q", "v")),
+ true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(() -> client.sampleRowKeys(TableId.of("table")), true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () ->
+ client.checkAndMutateRow(
+ ConditionalRowMutation.create("table", "key")
+ .condition(Filters.FILTERS.value().regex("old-value"))
+ .then(Mutation.create().setCell("cf", "q", "v"))),
+ true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () ->
+ client.readModifyWriteRow(
+ ReadModifyWriteRow.create("table", "row").append("cf", "q", "v")),
+ true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () -> client.readChangeStream(ReadChangeStreamQuery.create("table")).iterator().hasNext(),
+ true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () -> client.generateInitialChangeStreamPartitions("table").iterator().hasNext(), true);
+
+ // Verify that the new data API methods are tested or excluded. This is enforced by
+ // introspecting grpc
+ // method descriptors.
+ Set expected =
+ BigtableGrpc.getServiceDescriptor().getMethods().stream()
+ .map(MethodDescriptor::getBareMethodName)
+ .collect(Collectors.toSet());
+
+ // Exclude methods that don't support retry info
+ methods.add("PingAndWarm");
+
+ assertThat(methods).containsExactlyElementsIn(expected);
}
@Test
@@ -147,11 +237,6 @@ public void testReadRowServerNotReturningRetryInfoClientDisabledHandling() throw
}
}
- @Test
- public void testReadRows() {
- verifyRetryInfoIsUsed(() -> client.readRows(Query.create("table")).iterator().hasNext(), true);
- }
-
@Test
public void testReadRowsNonRetraybleErrorWithRetryInfo() {
verifyRetryInfoIsUsed(() -> client.readRows(Query.create("table")).iterator().hasNext(), false);
@@ -181,16 +266,6 @@ public void testReadRowsServerNotReturningRetryInfoClientDisabledHandling() thro
}
}
- @Test
- public void testMutateRows() {
- verifyRetryInfoIsUsed(
- () ->
- client.bulkMutateRows(
- BulkMutation.create("fake-table")
- .add(RowMutationEntry.create("row-key-1").setCell("cf", "q", "v"))),
- true);
- }
-
@Test
public void testMutateRowsNonRetryableErrorWithRetryInfo() {
verifyRetryInfoIsUsed(
@@ -238,12 +313,6 @@ public void testMutateRowsServerNotReturningRetryInfoClientDisabledHandling() th
}
}
- @Test
- public void testMutateRow() {
- verifyRetryInfoIsUsed(
- () -> client.mutateRow(RowMutation.create("table", "key").setCell("cf", "q", "v")), true);
- }
-
@Test
public void testMutateRowNonRetryableErrorWithRetryInfo() {
verifyRetryInfoIsUsed(
@@ -278,11 +347,6 @@ public void testMutateRowServerNotReturningRetryInfoClientDisabledHandling() thr
}
}
- @Test
- public void testSampleRowKeys() {
- verifyRetryInfoIsUsed(() -> client.sampleRowKeys("table"), true);
- }
-
@Test
public void testSampleRowKeysNonRetryableErrorWithRetryInfo() {
verifyRetryInfoIsUsed(() -> client.sampleRowKeys("table"), false);
@@ -312,17 +376,6 @@ public void testSampleRowKeysServerNotReturningRetryInfoClientDisabledHandling()
}
}
- @Test
- public void testCheckAndMutateRow() {
- verifyRetryInfoIsUsed(
- () ->
- client.checkAndMutateRow(
- ConditionalRowMutation.create("table", "key")
- .condition(Filters.FILTERS.value().regex("old-value"))
- .then(Mutation.create().setCell("cf", "q", "v"))),
- true);
- }
-
@Test
public void testCheckAndMutateDisableRetryInfo() throws IOException {
settings.stubSettings().setEnableRetryInfo(false);
@@ -368,15 +421,6 @@ public void testCheckAndMutateServerNotReturningRetryInfoClientDisabledHandling(
}
}
- @Test
- public void testReadModifyWrite() {
- verifyRetryInfoIsUsed(
- () ->
- client.readModifyWriteRow(
- ReadModifyWriteRow.create("table", "row").append("cf", "q", "v")),
- true);
- }
-
@Test
public void testReadModifyWriteDisableRetryInfo() throws IOException {
settings.stubSettings().setEnableRetryInfo(false);
@@ -414,13 +458,6 @@ public void testReadModifyWriteNotReturningRetryInfoClientDisabledHandling() thr
}
}
- @Test
- public void testReadChangeStream() {
- verifyRetryInfoIsUsed(
- () -> client.readChangeStream(ReadChangeStreamQuery.create("table")).iterator().hasNext(),
- true);
- }
-
@Test
public void testReadChangeStreamNonRetryableErrorWithRetryInfo() {
verifyRetryInfoIsUsed(
@@ -465,12 +502,6 @@ public void testReadChangeStreamNotReturningRetryInfoClientDisabledHandling() th
}
}
- @Test
- public void testGenerateInitialChangeStreamPartition() {
- verifyRetryInfoIsUsed(
- () -> client.generateInitialChangeStreamPartitions("table").iterator().hasNext(), true);
- }
-
@Test
public void testGenerateInitialChangeStreamPartitionNonRetryableError() {
verifyRetryInfoIsUsed(
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java
new file mode 100644
index 0000000000..a0b9c058dc
--- /dev/null
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
+import static com.google.common.truth.Truth.assertThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import com.google.api.Distribution;
+import com.google.api.MonitoredResource;
+import com.google.api.core.ApiFuture;
+import com.google.api.core.ApiFutures;
+import com.google.api.gax.rpc.UnaryCallable;
+import com.google.cloud.monitoring.v3.MetricServiceClient;
+import com.google.cloud.monitoring.v3.stub.MetricServiceStub;
+import com.google.common.collect.ImmutableList;
+import com.google.monitoring.v3.CreateTimeSeriesRequest;
+import com.google.monitoring.v3.TimeSeries;
+import com.google.protobuf.Empty;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.common.InstrumentationScopeInfo;
+import io.opentelemetry.sdk.metrics.data.AggregationTemporality;
+import io.opentelemetry.sdk.metrics.data.HistogramPointData;
+import io.opentelemetry.sdk.metrics.data.LongPointData;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramPointData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableSumData;
+import io.opentelemetry.sdk.resources.Resource;
+import java.util.Arrays;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+import org.mockito.junit.MockitoJUnit;
+import org.mockito.junit.MockitoRule;
+
+public class BigtableCloudMonitoringExporterTest {
+ private static final String projectId = "fake-project";
+ private static final String instanceId = "fake-instance";
+ private static final String appProfileId = "default";
+ private static final String tableId = "fake-table";
+ private static final String zone = "us-east-1";
+ private static final String cluster = "cluster-1";
+
+ private static final String clientName = "fake-client-name";
+ private static final String taskId = "fake-task-id";
+
+ @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule();
+
+ @Mock private MetricServiceStub mockMetricServiceStub;
+ private MetricServiceClient fakeMetricServiceClient;
+ private BigtableCloudMonitoringExporter exporter;
+
+ private Attributes attributes;
+ private Resource resource;
+ private InstrumentationScopeInfo scope;
+
+ @Before
+ public void setUp() {
+ fakeMetricServiceClient = new FakeMetricServiceClient(mockMetricServiceStub);
+
+ exporter =
+ new BigtableCloudMonitoringExporter(
+ projectId, fakeMetricServiceClient, /* applicationResource= */ null, taskId);
+
+ attributes =
+ Attributes.builder()
+ .put(BIGTABLE_PROJECT_ID_KEY, projectId)
+ .put(INSTANCE_ID_KEY, instanceId)
+ .put(TABLE_ID_KEY, tableId)
+ .put(CLUSTER_ID_KEY, cluster)
+ .put(ZONE_ID_KEY, zone)
+ .put(APP_PROFILE_KEY, appProfileId)
+ .build();
+
+ resource = Resource.create(Attributes.empty());
+
+ scope = InstrumentationScopeInfo.create(BuiltinMetricsConstants.METER_NAME);
+ }
+
+ @After
+ public void tearDown() {}
+
+ @Test
+ public void testExportingSumData() {
+ ArgumentCaptor argumentCaptor =
+ ArgumentCaptor.forClass(CreateTimeSeriesRequest.class);
+
+ UnaryCallable mockCallable = mock(UnaryCallable.class);
+ when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable);
+ ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance());
+ when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future);
+
+ long fakeValue = 11L;
+
+ long startEpoch = 10;
+ long endEpoch = 15;
+ LongPointData longPointData =
+ ImmutableLongPointData.create(startEpoch, endEpoch, attributes, fakeValue);
+
+ MetricData longData =
+ ImmutableMetricData.createLongSum(
+ resource,
+ scope,
+ "bigtable.googleapis.com/internal/client/retry_count",
+ "description",
+ "1",
+ ImmutableSumData.create(
+ true, AggregationTemporality.CUMULATIVE, ImmutableList.of(longPointData)));
+
+ exporter.export(Arrays.asList(longData));
+
+ CreateTimeSeriesRequest request = argumentCaptor.getValue();
+
+ assertThat(request.getTimeSeriesList()).hasSize(1);
+
+ TimeSeries timeSeries = request.getTimeSeriesList().get(0);
+
+ assertThat(timeSeries.getResource().getLabelsMap())
+ .containsExactly(
+ BIGTABLE_PROJECT_ID_KEY.getKey(), projectId,
+ INSTANCE_ID_KEY.getKey(), instanceId,
+ TABLE_ID_KEY.getKey(), tableId,
+ CLUSTER_ID_KEY.getKey(), cluster,
+ ZONE_ID_KEY.getKey(), zone);
+
+ assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2);
+ assertThat(timeSeries.getMetric().getLabelsMap())
+ .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId);
+ assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(fakeValue);
+ assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos())
+ .isEqualTo(startEpoch);
+ assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch);
+ }
+
+ @Test
+ public void testExportingHistogramData() {
+ ArgumentCaptor argumentCaptor =
+ ArgumentCaptor.forClass(CreateTimeSeriesRequest.class);
+
+ UnaryCallable mockCallable = mock(UnaryCallable.class);
+ when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable);
+ ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance());
+ when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future);
+
+ long startEpoch = 10;
+ long endEpoch = 15;
+ HistogramPointData histogramPointData =
+ ImmutableHistogramPointData.create(
+ startEpoch,
+ endEpoch,
+ attributes,
+ 3d,
+ true,
+ 1d, // min
+ true,
+ 2d, // max
+ Arrays.asList(1.0),
+ Arrays.asList(1L, 2L));
+
+ MetricData histogramData =
+ ImmutableMetricData.createDoubleHistogram(
+ resource,
+ scope,
+ "bigtable.googleapis.com/internal/client/operation_latencies",
+ "description",
+ "ms",
+ ImmutableHistogramData.create(
+ AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData)));
+
+ exporter.export(Arrays.asList(histogramData));
+
+ CreateTimeSeriesRequest request = argumentCaptor.getValue();
+
+ assertThat(request.getTimeSeriesList()).hasSize(1);
+
+ TimeSeries timeSeries = request.getTimeSeriesList().get(0);
+
+ assertThat(timeSeries.getResource().getLabelsMap())
+ .containsExactly(
+ BIGTABLE_PROJECT_ID_KEY.getKey(), projectId,
+ INSTANCE_ID_KEY.getKey(), instanceId,
+ TABLE_ID_KEY.getKey(), tableId,
+ CLUSTER_ID_KEY.getKey(), cluster,
+ ZONE_ID_KEY.getKey(), zone);
+
+ assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2);
+ assertThat(timeSeries.getMetric().getLabelsMap())
+ .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId);
+ Distribution distribution = timeSeries.getPoints(0).getValue().getDistributionValue();
+ assertThat(distribution.getCount()).isEqualTo(3);
+ assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos())
+ .isEqualTo(startEpoch);
+ assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch);
+ }
+
+ @Test
+ public void testTimeSeriesForMetricWithGceOrGkeResource() {
+ String gceProjectId = "fake-gce-project";
+ BigtableCloudMonitoringExporter exporter =
+ new BigtableCloudMonitoringExporter(
+ projectId,
+ fakeMetricServiceClient,
+ MonitoredResource.newBuilder()
+ .setType("gce-instance")
+ .putLabels("some-gce-key", "some-gce-value")
+ .putLabels("project_id", gceProjectId)
+ .build(),
+ taskId);
+ ArgumentCaptor argumentCaptor =
+ ArgumentCaptor.forClass(CreateTimeSeriesRequest.class);
+
+ UnaryCallable mockCallable = mock(UnaryCallable.class);
+ when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable);
+ ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance());
+ when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future);
+
+ long startEpoch = 10;
+ long endEpoch = 15;
+ HistogramPointData histogramPointData =
+ ImmutableHistogramPointData.create(
+ startEpoch,
+ endEpoch,
+ Attributes.of(
+ BIGTABLE_PROJECT_ID_KEY,
+ projectId,
+ INSTANCE_ID_KEY,
+ instanceId,
+ APP_PROFILE_KEY,
+ appProfileId,
+ CLIENT_NAME_KEY,
+ clientName),
+ 3d,
+ true,
+ 1d, // min
+ true,
+ 2d, // max
+ Arrays.asList(1.0),
+ Arrays.asList(1L, 2L));
+
+ MetricData histogramData =
+ ImmutableMetricData.createDoubleHistogram(
+ resource,
+ scope,
+ "bigtable.googleapis.com/internal/client/per_connection_error_count",
+ "description",
+ "ms",
+ ImmutableHistogramData.create(
+ AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData)));
+
+ exporter.export(Arrays.asList(histogramData));
+
+ CreateTimeSeriesRequest request = argumentCaptor.getValue();
+
+ assertThat(request.getName()).isEqualTo("projects/" + gceProjectId);
+ assertThat(request.getTimeSeriesList()).hasSize(1);
+
+ com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0);
+
+ assertThat(timeSeries.getResource().getLabelsMap())
+ .containsExactly("some-gce-key", "some-gce-value", "project_id", gceProjectId);
+
+ assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5);
+ assertThat(timeSeries.getMetric().getLabelsMap())
+ .containsAtLeast(
+ BIGTABLE_PROJECT_ID_KEY.getKey(),
+ projectId,
+ INSTANCE_ID_KEY.getKey(),
+ instanceId,
+ APP_PROFILE_KEY.getKey(),
+ appProfileId,
+ CLIENT_NAME_KEY.getKey(),
+ clientName,
+ CLIENT_UID_KEY.getKey(),
+ taskId);
+ }
+
+ private static class FakeMetricServiceClient extends MetricServiceClient {
+
+ protected FakeMetricServiceClient(MetricServiceStub stub) {
+ super(stub);
+ }
+ }
+}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java
index 5d16b623fd..a12dd3cfbd 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java
@@ -45,7 +45,6 @@
import com.google.cloud.bigtable.data.v2.models.SampleRowKeysRequest;
import com.google.cloud.bigtable.data.v2.models.TableId;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
-import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
import com.google.common.collect.ImmutableMap;
import io.grpc.ForwardingServerCall.SimpleForwardingServerCall;
import io.grpc.Metadata;
@@ -126,16 +125,21 @@ public void sendHeaders(Metadata headers) {
.setInstanceId(INSTANCE_ID)
.setAppProfileId(APP_PROFILE_ID)
.build();
- EnhancedBigtableStubSettings stubSettings =
- settings
- .getStubSettings()
+
+ ClientContext clientContext =
+ EnhancedBigtableStub.createClientContext(settings.getStubSettings());
+ clientContext =
+ clientContext
.toBuilder()
.setTracerFactory(
EnhancedBigtableStub.createBigtableTracerFactory(
- settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder()))
+ settings.getStubSettings(),
+ Tags.getTagger(),
+ localStats.getStatsRecorder(),
+ null))
.build();
- attempts = stubSettings.readRowsSettings().getRetrySettings().getMaxAttempts();
- stub = new EnhancedBigtableStub(stubSettings, ClientContext.create(stubSettings));
+ attempts = settings.getStubSettings().readRowsSettings().getRetrySettings().getMaxAttempts();
+ stub = new EnhancedBigtableStub(settings.getStubSettings(), clientContext);
// Create another server without injecting the server-timing header and another stub that
// connects to it.
@@ -147,18 +151,21 @@ public void sendHeaders(Metadata headers) {
.setInstanceId(INSTANCE_ID)
.setAppProfileId(APP_PROFILE_ID)
.build();
- EnhancedBigtableStubSettings noHeaderStubSettings =
- noHeaderSettings
- .getStubSettings()
+
+ ClientContext noHeaderClientContext =
+ EnhancedBigtableStub.createClientContext(noHeaderSettings.getStubSettings());
+ noHeaderClientContext =
+ noHeaderClientContext
.toBuilder()
.setTracerFactory(
EnhancedBigtableStub.createBigtableTracerFactory(
noHeaderSettings.getStubSettings(),
Tags.getTagger(),
- localStats.getStatsRecorder()))
+ localStats.getStatsRecorder(),
+ null))
.build();
noHeaderStub =
- new EnhancedBigtableStub(noHeaderStubSettings, ClientContext.create(noHeaderStubSettings));
+ new EnhancedBigtableStub(noHeaderSettings.getStubSettings(), noHeaderClientContext);
}
@After
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java
new file mode 100644
index 0000000000..09b7e1f663
--- /dev/null
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.api.core.InternalApi;
+import com.google.protobuf.Timestamp;
+import com.google.protobuf.util.Timestamps;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.metrics.data.HistogramPointData;
+import io.opentelemetry.sdk.metrics.data.LongPointData;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import java.util.Collection;
+import java.util.List;
+import java.util.stream.Collectors;
+import org.junit.Assert;
+
+@InternalApi
+public class BuiltinMetricsTestUtils {
+
+ private BuiltinMetricsTestUtils() {}
+
+ public static MetricData getMetricData(Collection allMetricData, String metricName) {
+ List metricDataList =
+ allMetricData.stream()
+ .filter(md -> md.getName().equals(BuiltinMetricsConstants.METER_NAME + metricName))
+ .collect(Collectors.toList());
+ if (metricDataList.size() == 0) {
+ allMetricData.stream().forEach(md -> System.out.println(md.getName()));
+ }
+ assertThat(metricDataList.size()).isEqualTo(1);
+
+ return metricDataList.get(0);
+ }
+
+ public static long getAggregatedValue(MetricData metricData, Attributes attributes) {
+ switch (metricData.getType()) {
+ case HISTOGRAM:
+ HistogramPointData hd =
+ metricData.getHistogramData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList())
+ .get(0);
+ return (long) hd.getSum() / hd.getCount();
+ case LONG_SUM:
+ LongPointData ld =
+ metricData.getLongSumData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList())
+ .get(0);
+ return ld.getValue();
+ default:
+ return 0;
+ }
+ }
+
+ public static Timestamp getStartTimeSeconds(MetricData metricData, Attributes attributes) {
+ switch (metricData.getType()) {
+ case HISTOGRAM:
+ HistogramPointData hd =
+ metricData.getHistogramData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList())
+ .get(0);
+ return Timestamps.fromNanos(hd.getStartEpochNanos());
+ case LONG_SUM:
+ LongPointData ld =
+ metricData.getLongSumData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList())
+ .get(0);
+ return Timestamps.fromNanos(ld.getStartEpochNanos());
+ default:
+ return Timestamp.getDefaultInstance();
+ }
+ }
+
+ public static void verifyAttributes(MetricData metricData, Attributes attributes) {
+ switch (metricData.getType()) {
+ case HISTOGRAM:
+ List hd =
+ metricData.getHistogramData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList());
+ assertThat(hd).isNotEmpty();
+ break;
+ case LONG_SUM:
+ List ld =
+ metricData.getLongSumData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList());
+ assertThat(ld).isNotEmpty();
+ break;
+ default:
+ Assert.fail("Unexpected type");
+ }
+ }
+}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java
index 06b923cad3..2dd4bcabb3 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java
@@ -15,14 +15,24 @@
*/
package com.google.cloud.bigtable.data.v2.stub.metrics;
-import static com.google.api.gax.tracing.ApiTracerFactory.OperationType;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedValue;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getMetricData;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.verifyAttributes;
import static com.google.common.truth.Truth.assertThat;
-import static org.junit.Assert.assertThrows;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
import com.google.api.client.util.Lists;
import com.google.api.core.ApiFunction;
@@ -36,7 +46,6 @@
import com.google.api.gax.rpc.NotFoundException;
import com.google.api.gax.rpc.ResponseObserver;
import com.google.api.gax.rpc.StreamController;
-import com.google.api.gax.tracing.SpanName;
import com.google.bigtable.v2.BigtableGrpc;
import com.google.bigtable.v2.MutateRowRequest;
import com.google.bigtable.v2.MutateRowResponse;
@@ -45,6 +54,7 @@
import com.google.bigtable.v2.ReadRowsRequest;
import com.google.bigtable.v2.ReadRowsResponse;
import com.google.bigtable.v2.ResponseParams;
+import com.google.cloud.bigtable.Version;
import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.FakeServiceBuilder;
import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId;
@@ -52,9 +62,9 @@
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
import com.google.cloud.bigtable.data.v2.models.RowMutationEntry;
+import com.google.cloud.bigtable.data.v2.models.TableId;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
-import com.google.cloud.bigtable.stats.StatsRecorderWrapper;
import com.google.common.base.Stopwatch;
import com.google.common.collect.Range;
import com.google.protobuf.ByteString;
@@ -77,11 +87,21 @@
import io.grpc.StatusRuntimeException;
import io.grpc.stub.ServerCallStreamObserver;
import io.grpc.stub.StreamObserver;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.InstrumentSelector;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.View;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
import java.nio.charset.Charset;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@@ -92,12 +112,8 @@
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Captor;
-import org.mockito.Mock;
import org.mockito.junit.MockitoJUnit;
import org.mockito.junit.MockitoRule;
-import org.mockito.stubbing.Answer;
import org.threeten.bp.Duration;
@RunWith(JUnit4.class)
@@ -105,8 +121,8 @@ public class BuiltinMetricsTracerTest {
private static final String PROJECT_ID = "fake-project";
private static final String INSTANCE_ID = "fake-instance";
private static final String APP_PROFILE_ID = "default";
- private static final String TABLE_ID = "fake-table";
- private static final String AUTHORIZED_VIEW_ID = "fake-authorized-view";
+ private static final String TABLE = "fake-table";
+
private static final String BAD_TABLE_ID = "non-exist-table";
private static final String ZONE = "us-west-1";
private static final String CLUSTER = "cluster-0";
@@ -114,6 +130,7 @@ public class BuiltinMetricsTracerTest {
private static final long SERVER_LATENCY = 100;
private static final long APPLICATION_LATENCY = 200;
private static final long SLEEP_VARIABILITY = 15;
+ private static final String CLIENT_NAME = "java-bigtable/" + Version.VERSION;
private static final long CHANNEL_BLOCKING_LATENCY = 75;
@@ -124,18 +141,35 @@ public class BuiltinMetricsTracerTest {
private EnhancedBigtableStub stub;
- @Mock private BuiltinMetricsTracerFactory mockFactory;
- @Mock private StatsRecorderWrapper statsRecorderWrapper;
+ private int batchElementCount = 2;
- @Captor private ArgumentCaptor status;
- @Captor private ArgumentCaptor tableId;
- @Captor private ArgumentCaptor zone;
- @Captor private ArgumentCaptor cluster;
+ private Attributes baseAttributes;
- private int batchElementCount = 2;
+ private InMemoryMetricReader metricReader;
@Before
public void setUp() throws Exception {
+ metricReader = InMemoryMetricReader.create();
+
+ baseAttributes =
+ Attributes.builder()
+ .put(BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY, PROJECT_ID)
+ .put(BuiltinMetricsConstants.INSTANCE_ID_KEY, INSTANCE_ID)
+ .put(BuiltinMetricsConstants.APP_PROFILE_KEY, APP_PROFILE_ID)
+ .build();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+
+ for (Map.Entry entry :
+ BuiltinMetricsConstants.getAllViews().entrySet()) {
+ meterProvider.registerView(entry.getKey(), entry.getValue());
+ }
+
+ OpenTelemetrySdk otel =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+ BuiltinMetricsTracerFactory facotry = BuiltinMetricsTracerFactory.create(otel, baseAttributes);
+
// Add an interceptor to add server-timing in headers
ServerInterceptor trailersInterceptor =
new ServerInterceptor() {
@@ -216,7 +250,8 @@ public void sendMessage(ReqT message) {
.setMaxOutstandingRequestBytes(1001L)
.build())
.build());
- stubSettingsBuilder.setTracerFactory(mockFactory);
+
+ stubSettingsBuilder.setTracerFactory(facotry);
InstantiatingGrpcChannelProvider.Builder channelProvider =
((InstantiatingGrpcChannelProvider) stubSettingsBuilder.getTransportChannelProvider())
@@ -247,117 +282,117 @@ public void tearDown() {
@Test
public void testReadRowsOperationLatencies() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
-
Stopwatch stopwatch = Stopwatch.createStarted();
- Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE_ID)).iterator());
+ Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE)).iterator());
long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS);
- verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture());
- // verify record operation is only called once
- verify(statsRecorderWrapper)
- .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(STREAMING_KEY, true)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+
+ MetricData metricData = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
- assertThat(operationLatency.getValue()).isIn(Range.closed(SERVER_LATENCY, elapsed));
- assertThat(status.getAllValues()).containsExactly("OK");
- assertThat(tableId.getAllValues()).containsExactly(TABLE_ID);
- assertThat(zone.getAllValues()).containsExactly(ZONE);
- assertThat(cluster.getAllValues()).containsExactly(CLUSTER);
+ long value = getAggregatedValue(metricData, expectedAttributes);
+ assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed));
}
@Test
public void testReadRowsOperationLatenciesOnAuthorizedView() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
-
+ String authorizedViewId = "test-authorized-view-id";
Stopwatch stopwatch = Stopwatch.createStarted();
Lists.newArrayList(
- stub.readRowsCallable()
- .call(Query.create(AuthorizedViewId.of(TABLE_ID, AUTHORIZED_VIEW_ID)))
- .iterator());
+ stub.readRowsCallable().call(Query.create(AuthorizedViewId.of(TABLE, authorizedViewId))));
long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS);
- verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture());
- // verify record operation is only called once
- verify(statsRecorderWrapper)
- .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(STREAMING_KEY, true)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
- assertThat(operationLatency.getValue()).isIn(Range.closed(SERVER_LATENCY, elapsed));
- assertThat(status.getAllValues()).containsExactly("OK");
- assertThat(tableId.getAllValues()).containsExactly(TABLE_ID);
- assertThat(zone.getAllValues()).containsExactly(ZONE);
- assertThat(cluster.getAllValues()).containsExactly(CLUSTER);
+ Collection allMetricData = metricReader.collectAllMetrics();
+
+ MetricData metricData = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
+ long value = getAggregatedValue(metricData, expectedAttributes);
+ assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed));
}
@Test
public void testGfeMetrics() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
- ArgumentCaptor gfeLatency = ArgumentCaptor.forClass(Long.class);
- ArgumentCaptor gfeMissingHeaders = ArgumentCaptor.forClass(Long.class);
-
- Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE_ID)));
-
- // Verify record attempt are called multiple times
- verify(statsRecorderWrapper, times(fakeService.getAttemptCounter().get()))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
-
- // The request was retried and gfe latency is only recorded in the retry attempt
- verify(statsRecorderWrapper).putGfeLatencies(gfeLatency.capture());
- assertThat(gfeLatency.getValue()).isEqualTo(FAKE_SERVER_TIMING);
-
- // The first time the request was retried, it'll increment missing header counter
- verify(statsRecorderWrapper, times(fakeService.getAttemptCounter().get()))
- .putGfeMissingHeaders(gfeMissingHeaders.capture());
- assertThat(gfeMissingHeaders.getAllValues()).containsExactly(1L, 0L);
-
- assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "OK");
- assertThat(tableId.getAllValues()).containsExactly(TABLE_ID, TABLE_ID);
- assertThat(zone.getAllValues()).containsExactly("global", ZONE);
- assertThat(cluster.getAllValues()).containsExactly("unspecified", CLUSTER);
+ Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE)));
+
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .build();
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+
+ MetricData serverLatenciesMetricData = getMetricData(allMetricData, SERVER_LATENCIES_NAME);
+
+ long serverLatencies = getAggregatedValue(serverLatenciesMetricData, expectedAttributes);
+ assertThat(serverLatencies).isEqualTo(FAKE_SERVER_TIMING);
+
+ MetricData connectivityErrorCountMetricData =
+ getMetricData(allMetricData, CONNECTIVITY_ERROR_COUNT_NAME);
+ Attributes expected1 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "UNAVAILABLE")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, "global")
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
+ Attributes expected2 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
+
+ verifyAttributes(connectivityErrorCountMetricData, expected1);
+ verifyAttributes(connectivityErrorCountMetricData, expected2);
+
+ assertThat(getAggregatedValue(connectivityErrorCountMetricData, expected1)).isEqualTo(1);
+ assertThat(getAggregatedValue(connectivityErrorCountMetricData, expected2)).isEqualTo(0);
}
@Test
public void testReadRowsApplicationLatencyWithAutoFlowControl() throws Exception {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
- ArgumentCaptor applicationLatency = ArgumentCaptor.forClass(Long.class);
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
-
final SettableApiFuture future = SettableApiFuture.create();
final AtomicInteger counter = new AtomicInteger(0);
// For auto flow control, application latency is the time application spent in onResponse.
stub.readRowsCallable()
.call(
- Query.create(TABLE_ID),
+ Query.create(TABLE),
new ResponseObserver() {
@Override
public void onStart(StreamController streamController) {}
@@ -383,37 +418,38 @@ public void onComplete() {
});
future.get();
- verify(statsRecorderWrapper).putApplicationLatencies(applicationLatency.capture());
- verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture());
- verify(statsRecorderWrapper)
- .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
-
assertThat(counter.get()).isEqualTo(fakeService.getResponseCounter().get());
- // Thread.sleep might not sleep for the requested amount depending on the interrupt period
- // defined by the OS.
- // On linux this is ~1ms but on windows may be as high as 15-20ms.
- assertThat(applicationLatency.getValue())
- .isAtLeast((APPLICATION_LATENCY - SLEEP_VARIABILITY) * counter.get());
- assertThat(applicationLatency.getValue())
- .isAtMost(operationLatency.getValue() - SERVER_LATENCY);
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData applicationLatency =
+ getMetricData(allMetricData, APPLICATION_BLOCKING_LATENCIES_NAME);
+
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .build();
+ long value = getAggregatedValue(applicationLatency, expectedAttributes);
+
+ assertThat(value).isAtLeast((APPLICATION_LATENCY - SLEEP_VARIABILITY) * counter.get());
+
+ MetricData operationLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
+ long operationLatencyValue =
+ getAggregatedValue(
+ operationLatency,
+ expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build());
+ assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY);
}
@Test
public void testReadRowsApplicationLatencyWithManualFlowControl() throws Exception {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
- ArgumentCaptor applicationLatency = ArgumentCaptor.forClass(Long.class);
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
int counter = 0;
- Iterator rows = stub.readRowsCallable().call(Query.create(TABLE_ID)).iterator();
+ Iterator rows = stub.readRowsCallable().call(Query.create(TABLE)).iterator();
while (rows.hasNext()) {
counter++;
@@ -421,148 +457,189 @@ public void testReadRowsApplicationLatencyWithManualFlowControl() throws Excepti
rows.next();
}
- verify(statsRecorderWrapper).putApplicationLatencies(applicationLatency.capture());
- verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture());
- verify(statsRecorderWrapper)
- .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData applicationLatency =
+ getMetricData(allMetricData, APPLICATION_BLOCKING_LATENCIES_NAME);
+
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .build();
- // For manual flow control, the last application latency shouldn't count, because at that point
- // the server already sent back all the responses.
+ long value = getAggregatedValue(applicationLatency, expectedAttributes);
+ // For manual flow control, the last application latency shouldn't count, because at that
+ // point the server already sent back all the responses.
assertThat(counter).isEqualTo(fakeService.getResponseCounter().get());
- assertThat(applicationLatency.getValue())
- .isAtLeast(APPLICATION_LATENCY * (counter - 1) - SERVER_LATENCY);
- assertThat(applicationLatency.getValue())
- .isAtMost(operationLatency.getValue() - SERVER_LATENCY);
+ assertThat(value).isAtLeast(APPLICATION_LATENCY * (counter - 1) - SERVER_LATENCY);
+
+ MetricData operationLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
+ long operationLatencyValue =
+ getAggregatedValue(
+ operationLatency,
+ expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build());
+ assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY);
}
@Test
- public void testRetryCount() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "MutateRow"),
- statsRecorderWrapper));
-
- ArgumentCaptor retryCount = ArgumentCaptor.forClass(Integer.class);
-
+ public void testRetryCount() throws InterruptedException {
stub.mutateRowCallable()
- .call(RowMutation.create(TABLE_ID, "random-row").setCell("cf", "q", "value"));
-
- // In TracedUnaryCallable, we create a future and add a TraceFinisher to the callback. Main
- // thread is blocked on waiting for the future to be completed. When onComplete is called on
- // the grpc thread, the future is completed, however we might not have enough time for
- // TraceFinisher to run. Add a 1 second time out to wait for the callback. This shouldn't have
- // any impact on production code.
- verify(statsRecorderWrapper, timeout(1000)).putRetryCount(retryCount.capture());
+ .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value"));
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, RETRY_COUNT_NAME);
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(METHOD_KEY, "Bigtable.MutateRow")
+ .put(STATUS_KEY, "OK")
+ .build();
- assertThat(retryCount.getValue()).isEqualTo(fakeService.getAttemptCounter().get() - 1);
+ long value = getAggregatedValue(metricData, expectedAttributes);
+ assertThat(value).isEqualTo(fakeService.getAttemptCounter().get() - 1);
}
@Test
public void testMutateRowAttemptsTagValues() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRow"), statsRecorderWrapper));
-
stub.mutateRowCallable()
- .call(RowMutation.create(TABLE_ID, "random-row").setCell("cf", "q", "value"));
-
- // Set a timeout to reduce flakiness of this test. BasicRetryingFuture will set
- // attempt succeeded and set the response which will call complete() in AbstractFuture which
- // calls releaseWaiters(). onOperationComplete() is called in TracerFinisher which will be
- // called after the mutateRow call is returned. So there's a race between when the call returns
- // and when the record() is called in onOperationCompletion().
- verify(statsRecorderWrapper, timeout(50).times(fakeService.getAttemptCounter().get()))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
- assertThat(zone.getAllValues()).containsExactly("global", "global", ZONE);
- assertThat(cluster.getAllValues()).containsExactly("unspecified", "unspecified", CLUSTER);
- assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "UNAVAILABLE", "OK");
- assertThat(tableId.getAllValues()).containsExactly(TABLE_ID, TABLE_ID, TABLE_ID);
+ .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value"));
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected1 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "UNAVAILABLE")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, "global")
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(METHOD_KEY, "Bigtable.MutateRow")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, false)
+ .build();
+
+ Attributes expected2 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.MutateRow")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, false)
+ .build();
+
+ verifyAttributes(metricData, expected1);
+ verifyAttributes(metricData, expected2);
}
@Test
public void testMutateRowsPartialError() throws InterruptedException {
+ Batcher batcher = stub.newMutateRowsBatcher(TableId.of(TABLE), null);
int numMutations = 6;
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper));
-
- Batcher batcher = stub.newMutateRowsBatcher(TABLE_ID, null);
for (int i = 0; i < numMutations; i++) {
String key = i % 2 == 0 ? "key" : "fail-key";
batcher.add(RowMutationEntry.create(key).setCell("f", "q", "v"));
}
- assertThrows(BatchingException.class, () -> batcher.close());
-
- int expectedNumRequests = numMutations / batchElementCount;
- verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Assert.assertThrows(BatchingException.class, batcher::close);
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.MutateRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, false)
+ .build();
- assertThat(zone.getAllValues()).containsExactly(ZONE, ZONE, ZONE);
- assertThat(cluster.getAllValues()).containsExactly(CLUSTER, CLUSTER, CLUSTER);
- assertThat(status.getAllValues()).containsExactly("OK", "OK", "OK");
+ verifyAttributes(metricData, expected);
}
@Test
public void testMutateRowsRpcError() {
+ Batcher batcher =
+ stub.newMutateRowsBatcher(TableId.of(BAD_TABLE_ID), null);
int numMutations = 6;
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper));
-
- Batcher batcher = stub.newMutateRowsBatcher(BAD_TABLE_ID, null);
for (int i = 0; i < numMutations; i++) {
- batcher.add(RowMutationEntry.create("key").setCell("f", "q", "v"));
+ String key = i % 2 == 0 ? "key" : "fail-key";
+ batcher.add(RowMutationEntry.create(key).setCell("f", "q", "v"));
}
- assertThrows(BatchingException.class, () -> batcher.close());
-
- int expectedNumRequests = numMutations / batchElementCount;
- verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Assert.assertThrows(BatchingException.class, batcher::close);
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "NOT_FOUND")
+ .put(TABLE_ID_KEY, BAD_TABLE_ID)
+ .put(ZONE_ID_KEY, "global")
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(METHOD_KEY, "Bigtable.MutateRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, false)
+ .build();
- assertThat(zone.getAllValues()).containsExactly("global", "global", "global");
- assertThat(cluster.getAllValues()).containsExactly("unspecified", "unspecified", "unspecified");
- assertThat(status.getAllValues()).containsExactly("NOT_FOUND", "NOT_FOUND", "NOT_FOUND");
+ verifyAttributes(metricData, expected);
}
@Test
public void testReadRowsAttemptsTagValues() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
Lists.newArrayList(stub.readRowsCallable().call(Query.create("fake-table")).iterator());
- // Set a timeout to reduce flakiness of this test. BasicRetryingFuture will set
- // attempt succeeded and set the response which will call complete() in AbstractFuture which
- // calls releaseWaiters(). onOperationComplete() is called in TracerFinisher which will be
- // called after the mutateRow call is returned. So there's a race between when the call returns
- // and when the record() is called in onOperationCompletion().
- verify(statsRecorderWrapper, timeout(50).times(fakeService.getAttemptCounter().get()))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
- assertThat(zone.getAllValues()).containsExactly("global", ZONE);
- assertThat(cluster.getAllValues()).containsExactly("unspecified", CLUSTER);
- assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "OK");
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected1 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "UNAVAILABLE")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, "global")
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, true)
+ .build();
+
+ Attributes expected2 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, true)
+ .build();
+
+ verifyAttributes(metricData, expected1);
+ verifyAttributes(metricData, expected2);
}
@Test
public void testBatchBlockingLatencies() throws InterruptedException {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper));
- try (Batcher batcher = stub.newMutateRowsBatcher(TABLE_ID, null)) {
+ try (Batcher batcher = stub.newMutateRowsBatcher(TABLE, null)) {
for (int i = 0; i < 6; i++) {
batcher.add(RowMutationEntry.create("key").setCell("f", "q", "v"));
}
@@ -571,86 +648,100 @@ public void testBatchBlockingLatencies() throws InterruptedException {
batcher.close();
int expectedNumRequests = 6 / batchElementCount;
- ArgumentCaptor throttledTime = ArgumentCaptor.forClass(Long.class);
- verify(statsRecorderWrapper, timeout(1000).times(expectedNumRequests))
- .putClientBlockingLatencies(throttledTime.capture());
- // After the first request is sent, batcher will block on add because of the server latency.
- // Blocking latency should be around server latency.
- assertThat(throttledTime.getAllValues().get(1)).isAtLeast(SERVER_LATENCY - 10);
- assertThat(throttledTime.getAllValues().get(2)).isAtLeast(SERVER_LATENCY - 10);
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData applicationLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME);
- verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.MutateRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
- assertThat(zone.getAllValues()).containsExactly(ZONE, ZONE, ZONE);
- assertThat(cluster.getAllValues()).containsExactly(CLUSTER, CLUSTER, CLUSTER);
+ long value = getAggregatedValue(applicationLatency, expectedAttributes);
+ // After the first request is sent, batcher will block on add because of the server latency.
+ // Blocking latency should be around server latency. So each data point would be at least
+ // (SERVER_LATENCY - 10).
+ long expected = (SERVER_LATENCY - 10) * (expectedNumRequests - 1) / expectedNumRequests;
+ assertThat(value).isAtLeast(expected);
}
}
@Test
- public void testQueuedOnChannelServerStreamLatencies() throws InterruptedException {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
- stub.readRowsCallable().all().call(Query.create(TABLE_ID));
-
- ArgumentCaptor blockedTime = ArgumentCaptor.forClass(Long.class);
-
- verify(statsRecorderWrapper, timeout(1000).times(fakeService.attemptCounter.get()))
- .putClientBlockingLatencies(blockedTime.capture());
+ public void testQueuedOnChannelServerStreamLatencies() {
+ stub.readRowsCallable().all().call(Query.create(TABLE));
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData clientLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME);
+
+ Attributes attributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
- assertThat(blockedTime.getAllValues().get(1)).isAtLeast(CHANNEL_BLOCKING_LATENCY);
+ long value = getAggregatedValue(clientLatency, attributes);
+ assertThat(value).isAtLeast(CHANNEL_BLOCKING_LATENCY);
}
@Test
- public void testQueuedOnChannelUnaryLatencies() throws InterruptedException {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRow"), statsRecorderWrapper));
- stub.mutateRowCallable().call(RowMutation.create(TABLE_ID, "a-key").setCell("f", "q", "v"));
+ public void testQueuedOnChannelUnaryLatencies() {
- ArgumentCaptor blockedTime = ArgumentCaptor.forClass(Long.class);
+ stub.mutateRowCallable().call(RowMutation.create(TABLE, "a-key").setCell("f", "q", "v"));
- verify(statsRecorderWrapper, timeout(1000).times(fakeService.attemptCounter.get()))
- .putClientBlockingLatencies(blockedTime.capture());
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData clientLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME);
- assertThat(blockedTime.getAllValues().get(1)).isAtLeast(CHANNEL_BLOCKING_LATENCY);
- assertThat(blockedTime.getAllValues().get(2)).isAtLeast(CHANNEL_BLOCKING_LATENCY);
+ Attributes attributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(METHOD_KEY, "Bigtable.MutateRow")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
+
+ long expected = CHANNEL_BLOCKING_LATENCY * 2 / 3;
+ long actual = getAggregatedValue(clientLatency, attributes);
+ assertThat(actual).isAtLeast(expected);
}
@Test
public void testPermanentFailure() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
try {
Lists.newArrayList(stub.readRowsCallable().call(Query.create(BAD_TABLE_ID)).iterator());
Assert.fail("Request should throw not found error");
} catch (NotFoundException e) {
}
- ArgumentCaptor attemptLatency = ArgumentCaptor.forClass(Long.class);
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData attemptLatency = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "NOT_FOUND")
+ .put(TABLE_ID_KEY, BAD_TABLE_ID)
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(ZONE_ID_KEY, "global")
+ .put(STREAMING_KEY, true)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
- verify(statsRecorderWrapper, timeout(50)).putAttemptLatencies(attemptLatency.capture());
- verify(statsRecorderWrapper, timeout(50)).putOperationLatencies(operationLatency.capture());
- verify(statsRecorderWrapper, timeout(50))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ verifyAttributes(attemptLatency, expected);
- assertThat(status.getValue()).isEqualTo("NOT_FOUND");
- assertThat(tableId.getValue()).isEqualTo(BAD_TABLE_ID);
- assertThat(cluster.getValue()).isEqualTo("unspecified");
- assertThat(zone.getValue()).isEqualTo("global");
+ MetricData opLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
+ verifyAttributes(opLatency, expected);
}
private static class FakeService extends BigtableGrpc.BigtableImplBase {
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java
index a6670182b8..4ab19a5337 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java
@@ -23,17 +23,29 @@
import com.google.api.gax.grpc.ChannelPoolSettings;
import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider;
import com.google.bigtable.v2.*;
+import com.google.cloud.bigtable.Version;
import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.FakeServiceBuilder;
import com.google.cloud.bigtable.data.v2.models.*;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
-import com.google.cloud.bigtable.stats.StatsRecorderWrapperForConnection;
import io.grpc.Server;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.grpc.stub.StreamObserver;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.InstrumentSelector;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.View;
+import io.opentelemetry.sdk.metrics.data.HistogramPointData;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.ScheduledExecutorService;
import org.junit.After;
import org.junit.Before;
@@ -51,25 +63,50 @@ public class ErrorCountPerConnectionTest {
private final FakeService fakeService = new FakeService();
private EnhancedBigtableStubSettings.Builder builder;
private ArgumentCaptor runnableCaptor;
- private StatsRecorderWrapperForConnection statsRecorderWrapperForConnection;
+
+ private InMemoryMetricReader metricReader;
+
+ private Attributes attributes;
@Before
public void setup() throws Exception {
server = FakeServiceBuilder.create(fakeService).start();
ScheduledExecutorService executors = Mockito.mock(ScheduledExecutorService.class);
+
+ attributes =
+ Attributes.builder()
+ .put(BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY, "fake-project")
+ .put(BuiltinMetricsConstants.INSTANCE_ID_KEY, "fake-instance")
+ .put(BuiltinMetricsConstants.APP_PROFILE_KEY, "")
+ .put(BuiltinMetricsConstants.CLIENT_NAME_KEY, "bigtable-java/" + Version.VERSION)
+ .build();
+
+ metricReader = InMemoryMetricReader.create();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+
+ for (Map.Entry entry :
+ BuiltinMetricsConstants.getAllViews().entrySet()) {
+ meterProvider.registerView(entry.getKey(), entry.getValue());
+ }
+
+ OpenTelemetrySdk otel =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+
builder =
BigtableDataSettings.newBuilderForEmulator(server.getPort())
.stubSettings()
.setBackgroundExecutorProvider(FixedExecutorProvider.create(executors))
.setProjectId("fake-project")
- .setInstanceId("fake-instance");
+ .setInstanceId("fake-instance")
+ .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(otel));
+
runnableCaptor = ArgumentCaptor.forClass(Runnable.class);
Mockito.when(
executors.scheduleAtFixedRate(runnableCaptor.capture(), anyLong(), anyLong(), any()))
.thenReturn(null);
-
- statsRecorderWrapperForConnection = Mockito.mock(StatsRecorderWrapperForConnection.class);
}
@After
@@ -98,14 +135,21 @@ public void readWithOneChannel() throws Exception {
// noop
}
}
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
+
runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(1);
- assertThat(allErrorCounts.get(0)).isEqualTo(errorCount);
+
+ Collection allMetrics = metricReader.collectAllMetrics();
+ MetricData metricData =
+ BuiltinMetricsTestUtils.getMetricData(
+ allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME);
+
+ // Make sure the correct bucket is updated with the correct number of data points
+ ArrayList histogramPointData =
+ new ArrayList<>(metricData.getHistogramData().getPoints());
+ assertThat(histogramPointData.size()).isEqualTo(1);
+ HistogramPointData point = histogramPointData.get(0);
+ int index = findDataPointIndex(point.getBoundaries(), errorCount);
+ assertThat(point.getCounts().get(index)).isEqualTo(1);
}
@Test
@@ -131,28 +175,35 @@ public void readWithTwoChannels() throws Exception {
// noop
}
}
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(2);
- // Requests get assigned to channels using a Round Robin algorithm, so half to each.
- assertThat(allErrorCounts).containsExactly(totalErrorCount / 2, totalErrorCount / 2);
+ long errorCountPerChannel = totalErrorCount / 2;
+
+ Collection allMetrics = metricReader.collectAllMetrics();
+ MetricData metricData =
+ BuiltinMetricsTestUtils.getMetricData(
+ allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME);
+
+ // The 2 channels should get equal amount of errors, so the totalErrorCount / 2 bucket is
+ // updated twice.
+ ArrayList histogramPointData =
+ new ArrayList<>(metricData.getHistogramData().getPoints());
+ assertThat(histogramPointData.size()).isEqualTo(1);
+ HistogramPointData point = histogramPointData.get(0);
+ int index = findDataPointIndex(point.getBoundaries(), errorCountPerChannel);
+ assertThat(point.getCounts().get(index)).isEqualTo(2);
}
@Test
public void readOverTwoPeriods() throws Exception {
EnhancedBigtableStub stub = EnhancedBigtableStub.create(builder.build());
- long errorCount = 0;
+ long errorCount1 = 0;
for (int i = 0; i < 20; i++) {
Query query;
if (i % 3 == 0) {
query = Query.create(ERROR_TABLE_NAME);
- errorCount += 1;
+ errorCount1 += 1;
} else {
query = Query.create(SUCCESS_TABLE_NAME);
}
@@ -162,16 +213,9 @@ public void readOverTwoPeriods() throws Exception {
// noop
}
}
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
- runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(1);
- assertThat(allErrorCounts.get(0)).isEqualTo(errorCount);
- errorCount = 0;
+ runInterceptorTasksAndAssertCount();
+ long errorCount2 = 0;
for (int i = 0; i < 20; i++) {
Query query;
@@ -179,7 +223,7 @@ public void readOverTwoPeriods() throws Exception {
query = Query.create(SUCCESS_TABLE_NAME);
} else {
query = Query.create(ERROR_TABLE_NAME);
- errorCount += 1;
+ errorCount2 += 1;
}
try {
stub.readRowsCallable().call(query).iterator().hasNext();
@@ -187,27 +231,22 @@ public void readOverTwoPeriods() throws Exception {
// noop
}
}
- errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
+
runInterceptorTasksAndAssertCount();
- allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(1);
- assertThat(allErrorCounts.get(0)).isEqualTo(errorCount);
- }
- @Test
- public void ignoreInactiveConnection() throws Exception {
- EnhancedBigtableStub stub = EnhancedBigtableStub.create(builder.build());
+ Collection allMetrics = metricReader.collectAllMetrics();
+ MetricData metricData =
+ BuiltinMetricsTestUtils.getMetricData(
+ allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME);
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
- runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts).isEmpty();
+ ArrayList histogramPointData =
+ new ArrayList<>(metricData.getHistogramData().getPoints());
+ assertThat(histogramPointData.size()).isEqualTo(1);
+ HistogramPointData point = histogramPointData.get(0);
+ int index1 = findDataPointIndex(point.getBoundaries(), errorCount1);
+ int index2 = findDataPointIndex(point.getBoundaries(), errorCount2);
+ assertThat(point.getCounts().get(index1)).isEqualTo(1);
+ assertThat(point.getCounts().get(index2)).isEqualTo(1);
}
@Test
@@ -221,22 +260,19 @@ public void noFailedRequests() throws Exception {
// noop
}
}
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(1);
- assertThat(allErrorCounts.get(0)).isEqualTo(0);
+ Collection allMetrics = metricReader.collectAllMetrics();
+ MetricData metricData =
+ BuiltinMetricsTestUtils.getMetricData(
+ allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME);
+ long value = BuiltinMetricsTestUtils.getAggregatedValue(metricData, attributes);
+ assertThat(value).isEqualTo(0);
}
private void runInterceptorTasksAndAssertCount() {
int actualNumOfTasks = 0;
for (Runnable runnable : runnableCaptor.getAllValues()) {
if (runnable instanceof ErrorCountPerConnectionMetricTracker) {
- ((ErrorCountPerConnectionMetricTracker) runnable)
- .setStatsRecorderWrapperForConnection(statsRecorderWrapperForConnection);
runnable.run();
actualNumOfTasks++;
}
@@ -244,6 +280,16 @@ private void runInterceptorTasksAndAssertCount() {
assertThat(actualNumOfTasks).isEqualTo(1);
}
+ private int findDataPointIndex(List boundaries, long dataPoint) {
+ int index = 0;
+ for (; index < boundaries.size(); index++) {
+ if (boundaries.get(index) >= dataPoint) {
+ break;
+ }
+ }
+ return index;
+ }
+
static class FakeService extends BigtableGrpc.BigtableImplBase {
@Override
public void readRows(
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java
index 15bd9171f0..d72eac4056 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java
@@ -39,7 +39,6 @@
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowMutationEntry;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
-import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor;
import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableMap;
@@ -120,15 +119,20 @@ public void setUp() throws Exception {
.setInstanceId(INSTANCE_ID)
.setAppProfileId(APP_PROFILE_ID)
.build();
- EnhancedBigtableStubSettings stubSettings =
- settings
- .getStubSettings()
+
+ ClientContext clientContext =
+ EnhancedBigtableStub.createClientContext(settings.getStubSettings());
+ clientContext =
+ clientContext
.toBuilder()
.setTracerFactory(
EnhancedBigtableStub.createBigtableTracerFactory(
- settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder()))
+ settings.getStubSettings(),
+ Tags.getTagger(),
+ localStats.getStatsRecorder(),
+ null))
.build();
- stub = new EnhancedBigtableStub(stubSettings, ClientContext.create(stubSettings));
+ stub = new EnhancedBigtableStub(settings.getStubSettings(), clientContext);
}
@After
diff --git a/grpc-google-cloud-bigtable-admin-v2/pom.xml b/grpc-google-cloud-bigtable-admin-v2/pom.xml
index e7d1f4076b..1ab145fce8 100644
--- a/grpc-google-cloud-bigtable-admin-v2/pom.xml
+++ b/grpc-google-cloud-bigtable-admin-v2/pom.xml
@@ -4,13 +4,13 @@
4.0.0
com.google.api.grpc
grpc-google-cloud-bigtable-admin-v2
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
grpc-google-cloud-bigtable-admin-v2
GRPC library for grpc-google-cloud-bigtable-admin-v2
com.google.cloud
google-cloud-bigtable-parent
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
@@ -18,14 +18,14 @@
com.google.cloud
google-cloud-bigtable-deps-bom
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
pom
import
com.google.cloud
google-cloud-bigtable-bom
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
pom
import
diff --git a/grpc-google-cloud-bigtable-v2/pom.xml b/grpc-google-cloud-bigtable-v2/pom.xml
index 983bfea873..f0aadd5760 100644
--- a/grpc-google-cloud-bigtable-v2/pom.xml
+++ b/grpc-google-cloud-bigtable-v2/pom.xml
@@ -4,13 +4,13 @@
4.0.0
com.google.api.grpc
grpc-google-cloud-bigtable-v2
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
grpc-google-cloud-bigtable-v2
GRPC library for grpc-google-cloud-bigtable-v2
com.google.cloud
google-cloud-bigtable-parent
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
@@ -18,14 +18,14 @@
com.google.cloud
google-cloud-bigtable-deps-bom
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
pom
import
com.google.cloud
google-cloud-bigtable-bom
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
pom
import
diff --git a/pom.xml b/pom.xml
index f193e7e852..6406ff41e1 100644
--- a/pom.xml
+++ b/pom.xml
@@ -4,7 +4,7 @@
google-cloud-bigtable-parent
pom
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
Google Cloud Bigtable Parent
https://github.com/googleapis/java-bigtable
@@ -14,7 +14,7 @@
com.google.cloud
sdk-platform-java-config
- 3.28.1
+ 3.29.0
@@ -153,27 +153,27 @@
com.google.api.grpc
proto-google-cloud-bigtable-v2
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
com.google.api.grpc
proto-google-cloud-bigtable-admin-v2
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
com.google.api.grpc
grpc-google-cloud-bigtable-v2
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
com.google.api.grpc
grpc-google-cloud-bigtable-admin-v2
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
com.google.cloud
google-cloud-bigtable
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
@@ -347,22 +347,6 @@
-
-
-
- with-shaded
-
-
- !skip-shaded
-
-
-
- google-cloud-bigtable-stats
-
-
diff --git a/proto-google-cloud-bigtable-admin-v2/pom.xml b/proto-google-cloud-bigtable-admin-v2/pom.xml
index ee13e371c9..7f05a7c112 100644
--- a/proto-google-cloud-bigtable-admin-v2/pom.xml
+++ b/proto-google-cloud-bigtable-admin-v2/pom.xml
@@ -4,13 +4,13 @@
4.0.0
com.google.api.grpc
proto-google-cloud-bigtable-admin-v2
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
proto-google-cloud-bigtable-admin-v2
PROTO library for proto-google-cloud-bigtable-admin-v2
com.google.cloud
google-cloud-bigtable-parent
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
@@ -18,14 +18,14 @@
com.google.cloud
google-cloud-bigtable-deps-bom
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
pom
import
com.google.cloud
google-cloud-bigtable-bom
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
pom
import
diff --git a/proto-google-cloud-bigtable-v2/pom.xml b/proto-google-cloud-bigtable-v2/pom.xml
index 2b0787b6d8..8f2db9b76d 100644
--- a/proto-google-cloud-bigtable-v2/pom.xml
+++ b/proto-google-cloud-bigtable-v2/pom.xml
@@ -4,13 +4,13 @@
4.0.0
com.google.api.grpc
proto-google-cloud-bigtable-v2
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
proto-google-cloud-bigtable-v2
PROTO library for proto-google-cloud-bigtable-v2
com.google.cloud
google-cloud-bigtable-parent
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
@@ -18,14 +18,14 @@
com.google.cloud
google-cloud-bigtable-deps-bom
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
pom
import
com.google.cloud
google-cloud-bigtable-bom
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
pom
import
diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml
index 48f9dd3756..473ddf3d26 100644
--- a/samples/install-without-bom/pom.xml
+++ b/samples/install-without-bom/pom.xml
@@ -25,13 +25,15 @@
+
com.google.cloud
google-cloud-bigtable
- 2.36.0
+ 2.38.0
+
junit
diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml
index 07ee02c1e5..64261c0efb 100644
--- a/samples/snapshot/pom.xml
+++ b/samples/snapshot/pom.xml
@@ -28,7 +28,7 @@
com.google.cloud
google-cloud-bigtable
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml
index 5a040c9b8c..807f1cecd9 100644
--- a/samples/snippets/pom.xml
+++ b/samples/snippets/pom.xml
@@ -23,14 +23,13 @@
UTF-8
-
com.google.cloud
libraries-bom
- 26.25.0
+ 26.37.0
pom
import
diff --git a/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java b/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java
new file mode 100644
index 0000000000..8f3047442b
--- /dev/null
+++ b/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java
@@ -0,0 +1,322 @@
+/*
+ * Copyright 2024 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.bigtable;
+
+import static com.google.cloud.bigtable.data.v2.models.Filters.FILTERS;
+
+import com.google.api.gax.rpc.NotFoundException;
+import com.google.api.gax.rpc.PermissionDeniedException;
+import com.google.api.gax.rpc.ServerStream;
+import com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient;
+import com.google.cloud.bigtable.admin.v2.BigtableTableAdminSettings;
+import com.google.cloud.bigtable.admin.v2.models.AuthorizedView;
+import com.google.cloud.bigtable.admin.v2.models.CreateAuthorizedViewRequest;
+import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest;
+import com.google.cloud.bigtable.admin.v2.models.FamilySubsets;
+import com.google.cloud.bigtable.admin.v2.models.SubsetView;
+import com.google.cloud.bigtable.admin.v2.models.Table;
+import com.google.cloud.bigtable.admin.v2.models.UpdateAuthorizedViewRequest;
+import com.google.cloud.bigtable.data.v2.BigtableDataClient;
+import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
+import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId;
+import com.google.cloud.bigtable.data.v2.models.Filters.Filter;
+import com.google.cloud.bigtable.data.v2.models.Query;
+import com.google.cloud.bigtable.data.v2.models.Row;
+import com.google.cloud.bigtable.data.v2.models.RowCell;
+import com.google.cloud.bigtable.data.v2.models.RowMutation;
+import com.google.protobuf.ByteString;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class AuthorizedViewExample {
+
+ private static final String COLUMN_FAMILY = "cf";
+ private static final String COLUMN_QUALIFIER_GREETING = "greeting";
+ private static final String COLUMN_QUALIFIER_NAME = "name";
+ private static final String ROW_KEY_PREFIX = "rowKey";
+ private final String tableId;
+ private final String authorizedViewId;
+ private final BigtableTableAdminClient adminClient;
+ private final BigtableDataClient dataClient;
+
+ public static void main(String[] args) throws IOException {
+
+ if (args.length != 2) {
+ System.out.println("Missing required project id or instance id");
+ return;
+ }
+ String projectId = args[0];
+ String instanceId = args[1];
+
+ AuthorizedViewExample authorizedViewExample =
+ new AuthorizedViewExample(projectId, instanceId, "test-table", "test-authorized-view");
+ authorizedViewExample.run();
+ }
+
+ public AuthorizedViewExample(
+ String projectId, String instanceId, String tableId, String authorizedViewId)
+ throws IOException {
+ this.tableId = tableId;
+ this.authorizedViewId = authorizedViewId;
+
+ // Creates the settings to configure a bigtable data client.
+ BigtableDataSettings settings =
+ BigtableDataSettings.newBuilder().setProjectId(projectId).setInstanceId(instanceId).build();
+
+ // Creates a bigtable data client.
+ dataClient = BigtableDataClient.create(settings);
+
+ // Creates the settings to configure a bigtable table admin client.
+ BigtableTableAdminSettings adminSettings =
+ BigtableTableAdminSettings.newBuilder()
+ .setProjectId(projectId)
+ .setInstanceId(instanceId)
+ .build();
+
+ // Creates a bigtable table admin client.
+ adminClient = BigtableTableAdminClient.create(adminSettings);
+ }
+
+ public void close() {
+ dataClient.close();
+ adminClient.close();
+ }
+
+ public void run() {
+ createTable();
+ createAuthorizedView();
+ updateAuthorizedView();
+ getAuthorizedView();
+ listAllAuthorizedViews();
+ writeToAuthorizedView();
+ readSingleRowFromAuthorizedView();
+ readRowsWithFilterFromAuthorizedView();
+ deleteAuthorizedView();
+ deleteTable();
+ close();
+ }
+
+ public void createTable() {
+ // Checks if table exists, creates table if it does not exist.
+ if (!adminClient.exists(tableId)) {
+ System.out.println("Table does not exist, creating table: " + tableId);
+ CreateTableRequest createTableRequest =
+ CreateTableRequest.of(tableId).addFamily(COLUMN_FAMILY);
+ Table table = adminClient.createTable(createTableRequest);
+ System.out.printf("Table: %s created successfully%n", table.getId());
+ }
+ }
+
+ public void deleteTable() {
+ // Deletes the entire table.
+ System.out.println("\nDelete table: " + tableId);
+ try {
+ adminClient.deleteTable(tableId);
+ System.out.printf("Table: %s deleted successfully%n", tableId);
+ } catch (NotFoundException e) {
+ System.err.println("Failed to delete a non-existent table: " + e.getMessage());
+ }
+ }
+
+ /**
+ * Demonstrates how to create an authorized view under a table with the specified configuration.
+ */
+ public void createAuthorizedView() {
+ // Checks if the authorized view exists, creates it if it does not exist.
+ try {
+ adminClient.getAuthorizedView(tableId, authorizedViewId);
+ } catch (NotFoundException exception) {
+ System.out.printf("%nCreating authorized view %s in table %s%n", authorizedViewId, tableId);
+ // [START bigtable_create_authorized_view]
+ try {
+ CreateAuthorizedViewRequest request =
+ CreateAuthorizedViewRequest.of(tableId, authorizedViewId)
+ .setAuthorizedViewType(
+ SubsetView.create()
+ .addRowPrefix("")
+ .setFamilySubsets(
+ COLUMN_FAMILY,
+ FamilySubsets.create().addQualifierPrefix(COLUMN_QUALIFIER_NAME)));
+ AuthorizedView authorizedView = adminClient.createAuthorizedView(request);
+ System.out.printf("AuthorizedView: %s created successfully%n", authorizedView.getId());
+ } catch (NotFoundException e) {
+ System.err.println(
+ "Failed to create an authorized view from a non-existent table: " + e.getMessage());
+ }
+ // [END bigtable_create_authorized_view]
+ }
+ }
+
+ /** Demonstrates how to modify an authorized view. */
+ public void updateAuthorizedView() {
+ System.out.printf("%nUpdating authorized view %s in table %s%n", authorizedViewId, tableId);
+ // [START bigtable_update_authorized_view]
+ try {
+ // Update to an authorized view permitting everything.
+ UpdateAuthorizedViewRequest request =
+ UpdateAuthorizedViewRequest.of(tableId, authorizedViewId)
+ .setAuthorizedViewType(
+ SubsetView.create()
+ .addRowPrefix("")
+ .setFamilySubsets(
+ COLUMN_FAMILY, FamilySubsets.create().addQualifierPrefix("")));
+ AuthorizedView authorizedView = adminClient.updateAuthorizedView(request);
+ System.out.printf("AuthorizedView: %s updated successfully%n", authorizedView.getId());
+ } catch (NotFoundException e) {
+ System.err.println("Failed to modify a non-existent authorized view: " + e.getMessage());
+ }
+ // [END bigtable_update_authorized_view]
+ }
+
+ /** Demonstrates how to get an authorized view's metadata. */
+ public AuthorizedView getAuthorizedView() {
+ System.out.printf("%nGetting authorized view %s in table %s%n", authorizedViewId, tableId);
+ // [START bigtable_get_authorized_view]
+ AuthorizedView authorizedView = null;
+ try {
+ authorizedView = adminClient.getAuthorizedView(tableId, authorizedViewId);
+ SubsetView subsetView = (SubsetView) authorizedView.getAuthorizedViewType();
+
+ for (ByteString rowPrefix : subsetView.getRowPrefixes()) {
+ System.out.printf("Row Prefix: %s%n", rowPrefix.toStringUtf8());
+ }
+ for (Map.Entry entry : subsetView.getFamilySubsets().entrySet()) {
+ for (ByteString qualifierPrefix : entry.getValue().getQualifierPrefixes()) {
+ System.out.printf(
+ "Column Family: %s, Qualifier Prefix: %s%n",
+ entry.getKey(), qualifierPrefix.toStringUtf8());
+ }
+ for (ByteString qualifier : entry.getValue().getQualifiers()) {
+ System.out.printf(
+ "Column Family: %s, Qualifier: %s%n", entry.getKey(), qualifier.toStringUtf8());
+ }
+ }
+ } catch (NotFoundException e) {
+ System.err.println(
+ "Failed to retrieve metadata from a non-existent authorized view: " + e.getMessage());
+ }
+ // [END bigtable_get_authorized_view]
+ return authorizedView;
+ }
+
+ /** Demonstrates how to list all authorized views within a table. */
+ public List listAllAuthorizedViews() {
+ System.out.printf("%nListing authorized views in table %s%n", tableId);
+ // [START bigtable_list_authorized_views]
+ List authorizedViewIds = new ArrayList<>();
+ try {
+ authorizedViewIds = adminClient.listAuthorizedViews(tableId);
+ for (String authorizedViewId : authorizedViewIds) {
+ System.out.println(authorizedViewId);
+ }
+ } catch (NotFoundException e) {
+ System.err.println(
+ "Failed to list authorized views from a non-existent table: " + e.getMessage());
+ }
+ // [END bigtable_list_authorized_views]
+ return authorizedViewIds;
+ }
+
+ /** Demonstrates how to delete an authorized view. */
+ public void deleteAuthorizedView() {
+ System.out.printf("%nDeleting authorized view %s in table %s%n", authorizedViewId, tableId);
+ // [START bigtable_delete_authorized_view]
+ try {
+ adminClient.deleteAuthorizedView(tableId, authorizedViewId);
+ System.out.printf("AuthorizedView: %s deleted successfully%n", authorizedViewId);
+ } catch (NotFoundException e) {
+ System.err.println("Failed to delete a non-existent authorized view: " + e.getMessage());
+ }
+ // [END bigtable_delete_authorized_view]
+ }
+
+ /** Demonstrates how to write some rows to an authorized view. */
+ public void writeToAuthorizedView() {
+ // [START bigtable_authorized_view_write_rows]
+ try {
+ System.out.println("\nWriting to authorized view");
+ String[] names = {"World", "Bigtable", "Java"};
+ for (int i = 0; i < names.length; i++) {
+ String greeting = "Hello " + names[i] + "!";
+ RowMutation rowMutation =
+ RowMutation.create(AuthorizedViewId.of(tableId, authorizedViewId), ROW_KEY_PREFIX + i)
+ .setCell(COLUMN_FAMILY, COLUMN_QUALIFIER_NAME, names[i])
+ .setCell(COLUMN_FAMILY, COLUMN_QUALIFIER_GREETING, greeting);
+ dataClient.mutateRow(rowMutation);
+ System.out.println(greeting);
+ }
+ } catch (Exception e) {
+ if (e instanceof NotFoundException) {
+ System.err.println("Failed to write to non-existent authorized view: " + e.getMessage());
+ } else if (e instanceof PermissionDeniedException) {
+ System.err.println(
+ "Failed to apply mutations outside of the authorized view: " + e.getMessage());
+ }
+ }
+ // [END bigtable_authorized_view_write_rows]
+ }
+
+ /** Demonstrates how to read a single row from an authorized view. */
+ public Row readSingleRowFromAuthorizedView() {
+ // [START bigtable_authorized_view_get_by_key]
+ try {
+ System.out.println("\nReading a single row by row key from an authorized view");
+ Row row =
+ dataClient.readRow(AuthorizedViewId.of(tableId, authorizedViewId), ROW_KEY_PREFIX + 0);
+ System.out.println("Row: " + row.getKey().toStringUtf8());
+ for (RowCell cell : row.getCells()) {
+ System.out.printf(
+ "Family: %s Qualifier: %s Value: %s%n",
+ cell.getFamily(), cell.getQualifier().toStringUtf8(), cell.getValue().toStringUtf8());
+ }
+ return row;
+ } catch (NotFoundException e) {
+ System.err.println("Failed to read from a non-existent authorized view: " + e.getMessage());
+ return null;
+ }
+ // [END bigtable_authorized_view_get_by_key]
+ }
+
+ /** Demonstrates how to read rows from an authorized view with a filter. */
+ public List readRowsWithFilterFromAuthorizedView() {
+ // [START bigtable_authorized_view_scan_with_filter]
+ try {
+ // A filter that matches only the most recent cell within each column
+ Filter filter = FILTERS.limit().cellsPerColumn(1);
+ System.out.println("\nScanning authorized view with filter");
+ Query query = Query.create(AuthorizedViewId.of(tableId, authorizedViewId)).filter(filter);
+ ServerStream rowStream = dataClient.readRows(query);
+ List authorizedViewRows = new ArrayList<>();
+ for (Row r : rowStream) {
+ System.out.println("Row Key: " + r.getKey().toStringUtf8());
+ authorizedViewRows.add(r);
+ for (RowCell cell : r.getCells()) {
+ System.out.printf(
+ "Family: %s Qualifier: %s Value: %s%n",
+ cell.getFamily(), cell.getQualifier().toStringUtf8(), cell.getValue().toStringUtf8());
+ }
+ }
+ return authorizedViewRows;
+ } catch (NotFoundException e) {
+ System.err.println("Failed to read a non-existent authorized view: " + e.getMessage());
+ return null;
+ }
+ // [END bigtable_authorized_view_scan_with_filter]
+ }
+}
diff --git a/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java b/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java
index d1f546c342..99bc25735d 100644
--- a/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java
+++ b/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java
@@ -156,7 +156,7 @@ public Row readSingleRow() {
// [START bigtable_hw_get_by_key]
try {
System.out.println("\nReading a single row by row key");
- Row row = dataClient.readRow(tableId, ROW_KEY_PREFIX + 0);
+ Row row = dataClient.readRow(TableId.of(tableId), ROW_KEY_PREFIX + 0);
System.out.println("Row: " + row.getKey().toStringUtf8());
for (RowCell cell : row.getCells()) {
System.out.printf(
diff --git a/samples/snippets/src/main/java/com/example/bigtable/InstanceAdminExample.java b/samples/snippets/src/main/java/com/example/bigtable/InstanceAdminExample.java
index 0bdae948d2..df813ace39 100644
--- a/samples/snippets/src/main/java/com/example/bigtable/InstanceAdminExample.java
+++ b/samples/snippets/src/main/java/com/example/bigtable/InstanceAdminExample.java
@@ -87,6 +87,11 @@ public void run() {
addCluster();
deleteCluster();
deleteInstance();
+ close();
+ }
+
+ // Close the client
+ void close() {
adminClient.close();
}
diff --git a/samples/snippets/src/main/java/com/example/bigtable/TableAdminExample.java b/samples/snippets/src/main/java/com/example/bigtable/TableAdminExample.java
index 9842658a82..5f804153a1 100644
--- a/samples/snippets/src/main/java/com/example/bigtable/TableAdminExample.java
+++ b/samples/snippets/src/main/java/com/example/bigtable/TableAdminExample.java
@@ -108,6 +108,11 @@ public void run() {
printModifiedColumnFamily();
deleteColumnFamily();
deleteTable();
+ close();
+ }
+
+ // Close the client
+ void close() {
adminClient.close();
}
diff --git a/samples/snippets/src/test/java/com/example/bigtable/AuthorizedViewExampleTest.java b/samples/snippets/src/test/java/com/example/bigtable/AuthorizedViewExampleTest.java
new file mode 100644
index 0000000000..5990d66107
--- /dev/null
+++ b/samples/snippets/src/test/java/com/example/bigtable/AuthorizedViewExampleTest.java
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2024 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.bigtable;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+
+import com.google.api.gax.rpc.NotFoundException;
+import com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient;
+import com.google.cloud.bigtable.admin.v2.BigtableTableAdminSettings;
+import com.google.cloud.bigtable.admin.v2.models.AuthorizedView;
+import com.google.cloud.bigtable.admin.v2.models.CreateAuthorizedViewRequest;
+import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest;
+import com.google.cloud.bigtable.admin.v2.models.FamilySubsets;
+import com.google.cloud.bigtable.admin.v2.models.SubsetView;
+import com.google.cloud.bigtable.data.v2.BigtableDataClient;
+import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
+import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId;
+import com.google.cloud.bigtable.data.v2.models.Row;
+import com.google.cloud.bigtable.data.v2.models.RowCell;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class AuthorizedViewExampleTest extends BigtableBaseTest {
+
+ private static final String TABLE_PREFIX = "table";
+ private static final String AUTHORIZED_VIEW_PREFIX = "authorized-view";
+ private static final String COLUMN_FAMILY = "cf";
+ private String tableId;
+ private String authorizedViewId;
+ private static BigtableDataClient dataClient;
+ private static BigtableTableAdminClient adminClient;
+ private AuthorizedViewExample authorizedViewExample;
+
+ @BeforeClass
+ public static void beforeClass() throws IOException {
+ initializeVariables();
+ BigtableDataSettings settings =
+ BigtableDataSettings.newBuilder().setProjectId(projectId).setInstanceId(instanceId).build();
+ dataClient = BigtableDataClient.create(settings);
+ BigtableTableAdminSettings adminSettings =
+ BigtableTableAdminSettings.newBuilder()
+ .setProjectId(projectId)
+ .setInstanceId(instanceId)
+ .build();
+ adminClient = BigtableTableAdminClient.create(adminSettings);
+ }
+
+ @AfterClass
+ public static void afterClass() {
+ garbageCollect();
+ dataClient.close();
+ adminClient.close();
+ }
+
+ @Before
+ public void setup() throws IOException {
+ tableId = generateResourceId(TABLE_PREFIX);
+ authorizedViewId = generateResourceId(AUTHORIZED_VIEW_PREFIX);
+ authorizedViewExample =
+ new AuthorizedViewExample(projectId, instanceId, tableId, authorizedViewId);
+ adminClient.createTable(CreateTableRequest.of(tableId).addFamily(COLUMN_FAMILY));
+ adminClient.createAuthorizedView(
+ CreateAuthorizedViewRequest.of(tableId, authorizedViewId)
+ .setAuthorizedViewType(
+ SubsetView.create()
+ .addRowPrefix("")
+ .setFamilySubsets(
+ COLUMN_FAMILY, FamilySubsets.create().addQualifierPrefix(""))));
+ }
+
+ @After
+ public void after() {
+ if (adminClient.exists(tableId)) {
+ // Deleting a table also deletes all the authorized views inside it.
+ adminClient.deleteTable(tableId);
+ }
+ authorizedViewExample.close();
+ }
+
+ @Test
+ public void testRunDoesNotFail() {
+ authorizedViewExample.run();
+ }
+
+ @Test
+ public void testAuthorizedViewCreateUpdateDelete() throws IOException {
+ // Creates an authorized view.
+ String testAuthorizedViewId = generateResourceId(AUTHORIZED_VIEW_PREFIX);
+ AuthorizedViewExample testAuthorizedViewExample =
+ new AuthorizedViewExample(projectId, instanceId, tableId, testAuthorizedViewId);
+ testAuthorizedViewExample.createAuthorizedView();
+ AuthorizedView authorizedView = adminClient.getAuthorizedView(tableId, testAuthorizedViewId);
+ assertEquals(authorizedView.getId(), testAuthorizedViewId);
+
+ // Updates the authorized view.
+ testAuthorizedViewExample.updateAuthorizedView();
+ AuthorizedView updatedAuthorizedView =
+ adminClient.getAuthorizedView(tableId, testAuthorizedViewId);
+ assertNotEquals(authorizedView, updatedAuthorizedView);
+
+ // Deletes the authorized view.
+ testAuthorizedViewExample.deleteAuthorizedView();
+ assertThrows(
+ NotFoundException.class,
+ () -> adminClient.getAuthorizedView(tableId, testAuthorizedViewId));
+
+ testAuthorizedViewExample.close();
+ }
+
+ @Test
+ public void testGetAuthorizedView() {
+ AuthorizedView authorizedView = authorizedViewExample.getAuthorizedView();
+ assertNotNull(authorizedView);
+ assertEquals(authorizedView.getId(), authorizedViewId);
+ }
+
+ @Test
+ public void testListAuthorizedView() {
+ List authorizedViewIds = authorizedViewExample.listAllAuthorizedViews();
+ assertEquals(authorizedViewIds.size(), 1);
+ assertEquals(authorizedViewIds.get(0), authorizedViewId);
+ }
+
+ @Test
+ public void testWriteToAuthorizedView() {
+ assertNull(dataClient.readRow(AuthorizedViewId.of(tableId, authorizedViewId), "rowKey0"));
+ authorizedViewExample.writeToAuthorizedView();
+ assertNotNull(dataClient.readRow(AuthorizedViewId.of(tableId, authorizedViewId), "rowKey0"));
+ }
+
+ @Test
+ public void testReadsFromAuthorizedView() {
+ authorizedViewExample.writeToAuthorizedView();
+
+ Row actualRow = authorizedViewExample.readSingleRowFromAuthorizedView();
+ assertEquals("rowKey0", actualRow.getKey().toStringUtf8());
+ assertEquals(2, actualRow.getCells().size());
+ assertEquals("Hello World!", actualRow.getCells().get(0).getValue().toStringUtf8());
+ assertEquals("World", actualRow.getCells().get(1).getValue().toStringUtf8());
+
+ List rows = authorizedViewExample.readRowsWithFilterFromAuthorizedView();
+ List printedRows = new ArrayList<>();
+ for (Row row : rows) {
+ for (RowCell cell : row.getCells()) {
+ printedRows.add(
+ String.format(
+ "%s_%s_%s:%s",
+ row.getKey().toStringUtf8(),
+ cell.getFamily(),
+ cell.getQualifier().toStringUtf8(),
+ cell.getValue().toStringUtf8()));
+ }
+ }
+ String[] expectedRows =
+ new String[] {
+ "rowKey0_cf_greeting:Hello World!",
+ "rowKey0_cf_name:World",
+ "rowKey1_cf_greeting:Hello Bigtable!",
+ "rowKey1_cf_name:Bigtable",
+ "rowKey2_cf_greeting:Hello Java!",
+ "rowKey2_cf_name:Java"
+ };
+ assertEquals(printedRows, Arrays.asList(expectedRows));
+ }
+
+ private static void garbageCollect() {
+ Pattern timestampPattern = Pattern.compile(TABLE_PREFIX + "-([0-9a-f]+)-([0-9a-f]+)");
+ for (String tableId : adminClient.listTables()) {
+ Matcher matcher = timestampPattern.matcher(tableId);
+ if (!matcher.matches()) {
+ continue;
+ }
+ String timestampStr = matcher.group(1);
+ long timestamp = Long.parseLong(timestampStr, 16);
+ if (System.currentTimeMillis() - timestamp < TimeUnit.MINUTES.toMillis(10)) {
+ continue;
+ }
+ System.out.println("\nGarbage collecting orphaned table: " + tableId);
+ adminClient.deleteTable(tableId);
+ }
+ }
+}
diff --git a/samples/snippets/src/test/java/com/example/bigtable/BigtableBaseTest.java b/samples/snippets/src/test/java/com/example/bigtable/BigtableBaseTest.java
index 8845c587ba..5a4475e898 100644
--- a/samples/snippets/src/test/java/com/example/bigtable/BigtableBaseTest.java
+++ b/samples/snippets/src/test/java/com/example/bigtable/BigtableBaseTest.java
@@ -55,7 +55,7 @@ public void tearDown() {
bout.reset();
}
- public static String generateTableId(String prefix) {
+ public static String generateResourceId(String prefix) {
return prefix + "-" + UUID.randomUUID().toString().substring(0, 20);
}
diff --git a/samples/snippets/src/test/java/com/example/bigtable/InstanceAdminExampleTest.java b/samples/snippets/src/test/java/com/example/bigtable/InstanceAdminExampleTest.java
index dc66b2f9a2..15df1f8fa5 100644
--- a/samples/snippets/src/test/java/com/example/bigtable/InstanceAdminExampleTest.java
+++ b/samples/snippets/src/test/java/com/example/bigtable/InstanceAdminExampleTest.java
@@ -78,6 +78,9 @@ public void after() {
if (adminClient.exists(instanceId)) {
adminClient.deleteInstance(instanceId);
}
+ if (instanceAdmin != null) {
+ instanceAdmin.close();
+ }
}
@Test
diff --git a/samples/snippets/src/test/java/com/example/bigtable/MobileTimeSeriesBaseTest.java b/samples/snippets/src/test/java/com/example/bigtable/MobileTimeSeriesBaseTest.java
index 1eb9dd0ae6..98182187a3 100644
--- a/samples/snippets/src/test/java/com/example/bigtable/MobileTimeSeriesBaseTest.java
+++ b/samples/snippets/src/test/java/com/example/bigtable/MobileTimeSeriesBaseTest.java
@@ -29,7 +29,7 @@
public class MobileTimeSeriesBaseTest extends BigtableBaseTest {
- public static final String TABLE_ID = generateTableId("mobile-time-series");
+ public static final String TABLE_ID = generateResourceId("mobile-time-series");
public static final String COLUMN_FAMILY_NAME_STATS = "stats_summary";
public static final String COLUMN_FAMILY_NAME_PLAN = "cell_plan";
public static final Instant CURRENT_TIME = Instant.now();
diff --git a/samples/snippets/src/test/java/com/example/bigtable/TableAdminExampleTest.java b/samples/snippets/src/test/java/com/example/bigtable/TableAdminExampleTest.java
index 391764b549..d4fd4de304 100644
--- a/samples/snippets/src/test/java/com/example/bigtable/TableAdminExampleTest.java
+++ b/samples/snippets/src/test/java/com/example/bigtable/TableAdminExampleTest.java
@@ -67,7 +67,7 @@ public static void afterClass() {
@Before
public void setup() throws IOException {
- tableId = generateTableId(TABLE_PREFIX);
+ tableId = generateResourceId(TABLE_PREFIX);
tableAdmin = new TableAdminExample(projectId, instanceId, tableId);
adminClient.createTable(CreateTableRequest.of(tableId).addFamily("cf"));
}
@@ -77,12 +77,15 @@ public void after() {
if (adminClient.exists(tableId)) {
adminClient.deleteTable(tableId);
}
+ if (tableAdmin != null) {
+ tableAdmin.close();
+ }
}
@Test
public void testCreateAndDeleteTable() throws IOException {
// Creates a table.
- String testTable = generateTableId(TABLE_PREFIX);
+ String testTable = generateResourceId(TABLE_PREFIX);
TableAdminExample testTableAdmin = new TableAdminExample(projectId, instanceId, testTable);
testTableAdmin.createTable();
assertTrue(adminClient.exists(testTable));
diff --git a/test-proxy/pom.xml b/test-proxy/pom.xml
index a7a0417369..d18e57f64b 100644
--- a/test-proxy/pom.xml
+++ b/test-proxy/pom.xml
@@ -12,11 +12,11 @@
google-cloud-bigtable-parent
com.google.cloud
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
- 2.37.1-SNAPSHOT
+ 2.38.1-SNAPSHOT
diff --git a/versions.txt b/versions.txt
index d5c1e3d879..a63166148c 100644
--- a/versions.txt
+++ b/versions.txt
@@ -1,10 +1,10 @@
# Format:
# module:released-version:current-version
-google-cloud-bigtable:2.37.0:2.37.1-SNAPSHOT
-grpc-google-cloud-bigtable-admin-v2:2.37.0:2.37.1-SNAPSHOT
-grpc-google-cloud-bigtable-v2:2.37.0:2.37.1-SNAPSHOT
-proto-google-cloud-bigtable-admin-v2:2.37.0:2.37.1-SNAPSHOT
-proto-google-cloud-bigtable-v2:2.37.0:2.37.1-SNAPSHOT
-google-cloud-bigtable-emulator:0.174.0:0.174.1-SNAPSHOT
-google-cloud-bigtable-emulator-core:0.174.0:0.174.1-SNAPSHOT
+google-cloud-bigtable:2.38.0:2.38.1-SNAPSHOT
+grpc-google-cloud-bigtable-admin-v2:2.38.0:2.38.1-SNAPSHOT
+grpc-google-cloud-bigtable-v2:2.38.0:2.38.1-SNAPSHOT
+proto-google-cloud-bigtable-admin-v2:2.38.0:2.38.1-SNAPSHOT
+proto-google-cloud-bigtable-v2:2.38.0:2.38.1-SNAPSHOT
+google-cloud-bigtable-emulator:0.175.0:0.175.1-SNAPSHOT
+google-cloud-bigtable-emulator-core:0.175.0:0.175.1-SNAPSHOT