diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/common/Type.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/common/Type.java
new file mode 100644
index 0000000000..df5c6dcd95
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/common/Type.java
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.common;
+
+import com.google.api.core.BetaApi;
+import com.google.api.core.InternalApi;
+import com.google.auto.value.AutoValue;
+import com.google.cloud.bigtable.data.v2.internal.ColumnToIndexMapper;
+import com.google.cloud.bigtable.data.v2.models.sql.SqlType;
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableList;
+import com.google.protobuf.ByteString;
+import java.util.List;
+import org.threeten.bp.Instant;
+
+/**
+ * Shared type implementations. Right now this is only used by SqlType but this will become a shared
+ * definition with Schema type (called {@link com.google.cloud.bigtable.admin.v2.models.Type} right
+ * now), and any other type interfaces needed in the future.
+ *
+ * This is considered an internal implementation detail and not meant to be used by applications.
+ * Types should only be used through the relevant interfaces and factories, e.g. {@link SqlType}.
+ */
+@BetaApi
+@InternalApi
+public interface Type {
+
+ @AutoValue
+ abstract class Bytes implements Type, SqlType {
+
+ public static Bytes create() {
+ return DefaultInstances.BYTES;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.BYTES;
+ }
+
+ @Override
+ public java.lang.String toString() {
+ return getCode().name();
+ }
+ }
+
+ @AutoValue
+ abstract class String implements Type, SqlType {
+ public static String create() {
+ return DefaultInstances.STRING;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.STRING;
+ }
+
+ @Override
+ public java.lang.String toString() {
+ return getCode().name();
+ }
+ }
+
+ @AutoValue
+ abstract class Int64 implements Type, SqlType {
+ public static Int64 create() {
+ return DefaultInstances.INT64;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.INT64;
+ }
+
+ @Override
+ public java.lang.String toString() {
+ return getCode().name();
+ }
+ }
+
+ @AutoValue
+ abstract class Float64 implements Type, SqlType {
+ public static Float64 create() {
+ return DefaultInstances.FLOAT64;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.FLOAT64;
+ }
+
+ @Override
+ public java.lang.String toString() {
+ return getCode().name();
+ }
+ }
+
+ @AutoValue
+ abstract class Float32 implements Type, SqlType {
+ public static Float32 create() {
+ return DefaultInstances.FLOAT32;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.FLOAT32;
+ }
+
+ @Override
+ public java.lang.String toString() {
+ return getCode().name();
+ }
+ }
+
+ @AutoValue
+ abstract class Bool implements Type, SqlType {
+ public static Bool create() {
+ return DefaultInstances.BOOL;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.BOOL;
+ }
+
+ @Override
+ public java.lang.String toString() {
+ return getCode().name();
+ }
+ }
+
+ @AutoValue
+ abstract class Timestamp implements Type, SqlType {
+ public static Timestamp create() {
+ return DefaultInstances.TIMESTAMP;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.TIMESTAMP;
+ }
+
+ @Override
+ public java.lang.String toString() {
+ return getCode().name();
+ }
+ }
+
+ @AutoValue
+ abstract class Date implements Type, SqlType {
+ public static Date create() {
+ return DefaultInstances.DATE;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.DATE;
+ }
+
+ @Override
+ public java.lang.String toString() {
+ return getCode().name();
+ }
+ }
+
+ /**
+ * This is a special version of struct that is intended to only be used in the {@link
+ * com.google.cloud.bigtable.data.v2.models.sql.StructReader} getters that require types. We don't
+ * want users to need to specify the struct schema when the schema will be validated on calls to
+ * {@link com.google.cloud.bigtable.data.v2.models.sql.StructReader} methods on the struct.
+ *
+ * Any attempts to interact with the schema will throw an exception.
+ *
+ *
For example the historical map data type uses this as follows:
+ *
+ *
{@code
+ * Map> historicalMap =
+ * resultSet.getMap(
+ * "cf",
+ * SqlType.mapOf(SqlType.bytes(), SqlType.arrayOf(SqlType.struct())));
+ * Struct struct = historicalMap.get("column").get(0);
+ * // Struct schema will be validated here so there's no need for users to pass the schema to getMap above
+ * ByteString value = struct.getBytes("value");
+ * }
+ */
+ @AutoValue
+ abstract class SchemalessStruct implements Type, SqlType.Struct {
+ public static SchemalessStruct create() {
+ return DefaultInstances.SCHEMALESS_STRUCT;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.STRUCT;
+ }
+
+ @Override
+ public List getFields() {
+ throw new UnsupportedOperationException(
+ "Attempting to access schema of Schemaless Struct. These structs should only be used for typing of StructReader data access calls.");
+ }
+
+ @Override
+ public SqlType> getType(int fieldIndex) {
+ throw new UnsupportedOperationException(
+ "Attempting to access schema of Schemaless Struct. These structs should only be used for typing of StructReader data access calls.");
+ }
+
+ @Override
+ public SqlType> getType(java.lang.String fieldName) {
+ throw new UnsupportedOperationException(
+ "Attempting to access schema of Schemaless Struct. These structs should only be used for typing of StructReader data access calls.");
+ }
+
+ @Override
+ public int getColumnIndex(java.lang.String fieldName) {
+ throw new UnsupportedOperationException(
+ "Attempting to access schema of Schemaless Struct. These structs should only be used for typing of StructReader data access calls.");
+ }
+
+ @Override
+ public java.lang.String toString() {
+ return getCode().name();
+ }
+ }
+
+ /**
+ * Struct implementation that contains a schema that users can access. This should never be
+ * constructed by users. It is only intended to be created directly from Type protobufs.
+ */
+ class StructWithSchema extends ColumnToIndexMapper implements Type, SqlType.Struct {
+
+ private final List fields;
+
+ @InternalApi("Visible for testing")
+ public StructWithSchema(List fields) {
+ super(fields);
+ this.fields = fields;
+ }
+
+ @InternalApi("Visible for testing")
+ @AutoValue
+ public abstract static class Field implements SqlType.Struct.Field {
+ public static Field fromProto(com.google.bigtable.v2.Type.Struct.Field proto) {
+ return new AutoValue_Type_StructWithSchema_Field(
+ proto.getFieldName(), SqlType.fromProto(proto.getType()));
+ }
+
+ @Override
+ public abstract java.lang.String name();
+
+ @Override
+ public abstract SqlType> type();
+ }
+
+ public static StructWithSchema fromProto(com.google.bigtable.v2.Type.Struct proto) {
+ ImmutableList.Builder fields = ImmutableList.builder();
+ for (com.google.bigtable.v2.Type.Struct.Field protoField : proto.getFieldsList()) {
+ fields.add(Field.fromProto(protoField));
+ }
+ return new StructWithSchema(fields.build());
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.STRUCT;
+ }
+
+ @Override
+ public List getFields() {
+ return fields;
+ }
+
+ @Override
+ public SqlType> getType(int fieldIndex) {
+ return fields.get(fieldIndex).type();
+ }
+
+ @Override
+ public SqlType> getType(java.lang.String fieldName) {
+ int index = getColumnIndex(fieldName);
+ return getType(index);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ StructWithSchema struct = (StructWithSchema) obj;
+ // Everything is derived from fields so that's all we need to compare;
+ return Objects.equal(getFields(), struct.getFields());
+ }
+
+ @Override
+ public int hashCode() {
+ // Everything is derived from fields so that's all we need;
+ return Objects.hashCode(fields);
+ }
+
+ @Override
+ public java.lang.String toString() {
+ return getCode().name() + "{fields=" + fields.toString() + "}";
+ }
+ }
+
+ @AutoValue
+ abstract class Array implements Type, SqlType.Array {
+ // Do we need non-sql type array elements? Might get messy
+ public static Type.Array create(SqlType elemType) {
+ return new AutoValue_Type_Array<>(elemType);
+ }
+
+ protected abstract SqlType elementType();
+
+ @Override
+ public Code getCode() {
+ return Code.ARRAY;
+ }
+
+ @Override
+ public SqlType getElementType() {
+ return elementType();
+ }
+
+ @Override
+ public java.lang.String toString() {
+ return getCode().name() + "{elementType=" + getElementType().getCode() + "}";
+ }
+ }
+
+ @AutoValue
+ abstract class Map implements Type, SqlType.Map {
+ // Same question as for array
+ public static Type.Map create(SqlType keyType, SqlType valueType) {
+ return new AutoValue_Type_Map<>(keyType, valueType);
+ }
+
+ protected abstract SqlType keyType();
+
+ protected abstract SqlType valueType();
+
+ @Override
+ public Code getCode() {
+ return Code.MAP;
+ }
+
+ @Override
+ public SqlType getKeyType() {
+ return keyType();
+ }
+
+ @Override
+ public SqlType getValueType() {
+ return valueType();
+ }
+
+ @Override
+ public java.lang.String toString() {
+ return getCode().name()
+ + "{keyType="
+ + getKeyType().toString()
+ + ", valueType="
+ + getValueType().toString()
+ + "}";
+ }
+ }
+
+ // Implementation detail to make singleton instances private without referencing the concrete
+ // autovalue generated class from the abstract base classes.
+ @InternalApi
+ class DefaultInstances {
+ private static final Bytes BYTES = new AutoValue_Type_Bytes();
+ private static final String STRING = new AutoValue_Type_String();
+ private static final Int64 INT64 = new AutoValue_Type_Int64();
+ private static final Float64 FLOAT64 = new AutoValue_Type_Float64();
+ private static final Float32 FLOAT32 = new AutoValue_Type_Float32();
+ private static final Bool BOOL = new AutoValue_Type_Bool();
+ private static final Timestamp TIMESTAMP = new AutoValue_Type_Timestamp();
+ private static final Date DATE = new AutoValue_Type_Date();
+ private static final SchemalessStruct SCHEMALESS_STRUCT = new AutoValue_Type_SchemalessStruct();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java
index 8f08f82d8a..61f51924f1 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java
@@ -30,6 +30,7 @@
import com.google.api.gax.rpc.ServerStream;
import com.google.api.gax.rpc.ServerStreamingCallable;
import com.google.api.gax.rpc.UnaryCallable;
+import com.google.cloud.bigtable.data.v2.internal.ResultSetImpl;
import com.google.cloud.bigtable.data.v2.models.BulkMutation;
import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord;
import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation;
@@ -47,7 +48,10 @@
import com.google.cloud.bigtable.data.v2.models.SampleRowKeysRequest;
import com.google.cloud.bigtable.data.v2.models.TableId;
import com.google.cloud.bigtable.data.v2.models.TargetId;
+import com.google.cloud.bigtable.data.v2.models.sql.ResultSet;
+import com.google.cloud.bigtable.data.v2.models.sql.Statement;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
+import com.google.cloud.bigtable.data.v2.stub.sql.SqlServerStream;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.protobuf.ByteString;
import java.io.IOException;
@@ -2610,6 +2614,34 @@ public void readChangeStreamAsync(
return stub.readChangeStreamCallable();
}
+ /**
+ * Executes a SQL Query and returns a ResultSet to iterate over the results. The returned
+ * ResultSet instance is not threadsafe, it can only be used from single thread.
+ *
+ * Sample code:
+ *
+ *
{@code
+ * try (BigtableDataClient bigtableDataClient = BigtableDataClient.create("[PROJECT]", "[INSTANCE]")) {
+ * String query = "SELECT CAST(cf['stringCol'] AS STRING) FROM [TABLE]";
+ *
+ * try (ResultSet resultSet = bigtableDataClient.executeQuery(Statement.of(query))) {
+ * while (resultSet.next()) {
+ * String s = resultSet.getString("stringCol");
+ * // do something with data
+ * }
+ * } catch (RuntimeException e) {
+ * e.printStackTrace();
+ * }
+ * }
+ *
+ * @see Statement For query options.
+ */
+ @BetaApi
+ public ResultSet executeQuery(Statement statement) {
+ SqlServerStream stream = stub.createExecuteQueryCallable().call(statement);
+ return ResultSetImpl.create(stream);
+ }
+
/** Close the clients and releases all associated resources. */
@Override
public void close() {
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/gapic_metadata.json b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/gapic_metadata.json
index 495762d219..1134631db2 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/gapic_metadata.json
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/gapic_metadata.json
@@ -13,6 +13,9 @@
"CheckAndMutateRow": {
"methods": ["checkAndMutateRow", "checkAndMutateRow", "checkAndMutateRow", "checkAndMutateRow", "checkAndMutateRow", "checkAndMutateRowCallable"]
},
+ "ExecuteQuery": {
+ "methods": ["executeQueryCallable"]
+ },
"GenerateInitialChangeStreamPartitions": {
"methods": ["generateInitialChangeStreamPartitionsCallable"]
},
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/AbstractProtoStructReader.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/AbstractProtoStructReader.java
new file mode 100644
index 0000000000..7035a8285d
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/AbstractProtoStructReader.java
@@ -0,0 +1,337 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.internal;
+
+import com.google.api.core.InternalApi;
+import com.google.bigtable.v2.Value;
+import com.google.bigtable.v2.Value.KindCase;
+import com.google.cloud.Date;
+import com.google.cloud.bigtable.data.v2.models.sql.SqlType;
+import com.google.cloud.bigtable.data.v2.models.sql.Struct;
+import com.google.cloud.bigtable.data.v2.models.sql.StructReader;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.Timestamp;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.threeten.bp.Instant;
+
+@InternalApi
+public abstract class AbstractProtoStructReader implements StructReader {
+
+ abstract List values();
+
+ // Force subclasses to override equals and hashcode. We need this for tests.
+ public abstract boolean equals(Object other);
+
+ public abstract int hashCode();
+
+ /**
+ * @param columnName name of the column
+ * @return the index of the column named {@code columnName}
+ * @throws IllegalArgumentException if there is not exactly one column with the given name
+ */
+ public abstract int getColumnIndex(String columnName);
+
+ /**
+ * @param columnIndex index of the column
+ * @return the type of the column at the given index
+ */
+ public abstract SqlType> getColumnType(int columnIndex);
+
+ /**
+ * @param columnName name of the column
+ * @return the type of the column with the given name
+ * @throws IllegalArgumentException if there is not exactly one column with the given name
+ */
+ public SqlType> getColumnType(String columnName) {
+ return getColumnType(getColumnIndex(columnName));
+ }
+
+ @Override
+ public boolean isNull(int columnIndex) {
+ Value value = values().get(columnIndex);
+ return value.getKindCase().equals(KindCase.KIND_NOT_SET);
+ }
+
+ @Override
+ public boolean isNull(String columnName) {
+ return isNull(getColumnIndex(columnName));
+ }
+
+ @Override
+ public ByteString getBytes(int columnIndex) {
+ checkNonNullOfType(columnIndex, SqlType.bytes(), columnIndex);
+ Value value = values().get(columnIndex);
+ return value.getBytesValue();
+ }
+
+ @Override
+ public ByteString getBytes(String columnName) {
+ int columnIndex = getColumnIndex(columnName);
+ checkNonNullOfType(columnIndex, SqlType.bytes(), columnName);
+ Value value = values().get(columnIndex);
+ return value.getBytesValue();
+ }
+
+ @Override
+ public String getString(int columnIndex) {
+ checkNonNullOfType(columnIndex, SqlType.string(), columnIndex);
+ Value value = values().get(columnIndex);
+ return value.getStringValue();
+ }
+
+ @Override
+ public String getString(String columnName) {
+ int columnIndex = getColumnIndex(columnName);
+ checkNonNullOfType(columnIndex, SqlType.string(), columnName);
+ Value value = values().get(columnIndex);
+ return value.getStringValue();
+ }
+
+ @Override
+ public long getLong(int columnIndex) {
+ checkNonNullOfType(columnIndex, SqlType.int64(), columnIndex);
+ Value value = values().get(columnIndex);
+ return value.getIntValue();
+ }
+
+ @Override
+ public long getLong(String columnName) {
+ int columnIndex = getColumnIndex(columnName);
+ checkNonNullOfType(columnIndex, SqlType.int64(), columnName);
+ Value value = values().get(columnIndex);
+ return value.getIntValue();
+ }
+
+ @Override
+ public double getDouble(int columnIndex) {
+ checkNonNullOfType(columnIndex, SqlType.float64(), columnIndex);
+ Value value = values().get(columnIndex);
+ return value.getFloatValue();
+ }
+
+ @Override
+ public double getDouble(String columnName) {
+ int columnIndex = getColumnIndex(columnName);
+ checkNonNullOfType(columnIndex, SqlType.float64(), columnName);
+ Value value = values().get(columnIndex);
+ return value.getFloatValue();
+ }
+
+ @Override
+ public float getFloat(int columnIndex) {
+ checkNonNullOfType(columnIndex, SqlType.float32(), columnIndex);
+ Value value = values().get(columnIndex);
+ return (float) value.getFloatValue();
+ }
+
+ @Override
+ public float getFloat(String columnName) {
+ int columnIndex = getColumnIndex(columnName);
+ checkNonNullOfType(columnIndex, SqlType.float32(), columnName);
+ Value value = values().get(columnIndex);
+ return (float) value.getFloatValue();
+ }
+
+ @Override
+ public boolean getBoolean(int columnIndex) {
+ checkNonNullOfType(columnIndex, SqlType.bool(), columnIndex);
+ Value value = values().get(columnIndex);
+ return value.getBoolValue();
+ }
+
+ @Override
+ public boolean getBoolean(String columnName) {
+ int columnIndex = getColumnIndex(columnName);
+ checkNonNullOfType(columnIndex, SqlType.bool(), columnName);
+ Value value = values().get(columnIndex);
+ return value.getBoolValue();
+ }
+
+ @Override
+ public Instant getTimestamp(int columnIndex) {
+ checkNonNullOfType(columnIndex, SqlType.timestamp(), columnIndex);
+ Value value = values().get(columnIndex);
+ return toInstant(value.getTimestampValue());
+ }
+
+ @Override
+ public Instant getTimestamp(String columnName) {
+ int columnIndex = getColumnIndex(columnName);
+ checkNonNullOfType(columnIndex, SqlType.timestamp(), columnName);
+ Value value = values().get(columnIndex);
+ return toInstant(value.getTimestampValue());
+ }
+
+ @Override
+ public Date getDate(int columnIndex) {
+ checkNonNullOfType(columnIndex, SqlType.date(), columnIndex);
+ Value value = values().get(columnIndex);
+ return fromProto(value.getDateValue());
+ }
+
+ @Override
+ public Date getDate(String columnName) {
+ int columnIndex = getColumnIndex(columnName);
+ checkNonNullOfType(columnIndex, SqlType.date(), columnName);
+ Value value = values().get(columnIndex);
+ return fromProto(value.getDateValue());
+ }
+
+ @Override
+ public Struct getStruct(int columnIndex) {
+ checkNonNullOfType(columnIndex, SqlType.struct(), columnIndex);
+ Value value = values().get(columnIndex);
+ SqlType.Struct schema = (SqlType.Struct) getColumnType(columnIndex);
+ // A struct value is represented as an array
+ return ProtoStruct.create(schema, value.getArrayValue());
+ }
+
+ @Override
+ public Struct getStruct(String columnName) {
+ int columnIndex = getColumnIndex(columnName);
+ checkNonNullOfType(columnIndex, SqlType.struct(), columnName);
+ Value value = values().get(columnIndex);
+ SqlType.Struct schema = (SqlType.Struct) getColumnType(columnIndex);
+ // A struct value is represented as an array
+ return ProtoStruct.create(schema, value.getArrayValue());
+ }
+
+ @Override
+ public List getList(int columnIndex, SqlType.Array arrayType) {
+ // Note it is important that we use the actualType to decode bc user passed struct types
+ // won't have schemas
+ SqlType> actualType = getColumnType(columnIndex);
+ checkNonNullOfType(columnIndex, arrayType, actualType, columnIndex);
+ Value value = values().get(columnIndex);
+ return (List) decodeValue(value, actualType);
+ }
+
+ @Override
+ public List getList(String columnName, SqlType.Array arrayType) {
+ int columnIndex = getColumnIndex(columnName);
+ // Note it is important that we use the actualType to decode bc user passed struct types
+ // won't have schemas
+ SqlType> actualType = getColumnType(columnIndex);
+ checkNonNullOfType(columnIndex, arrayType, actualType, columnName);
+ Value value = values().get(columnIndex);
+ return (List) decodeValue(value, actualType);
+ }
+
+ @Override
+ public Map getMap(int columnIndex, SqlType.Map mapType) {
+ // Note it is important that we use the actualType to decode bc user passed struct types
+ // won't have schemas
+ SqlType> actualType = getColumnType(columnIndex);
+ checkNonNullOfType(columnIndex, mapType, actualType, columnIndex);
+ Value value = values().get(columnIndex);
+ return (Map) decodeValue(value, actualType);
+ }
+
+ @Override
+ public Map getMap(String columnName, SqlType.Map mapType) {
+ int columnIndex = getColumnIndex(columnName);
+ // Note it is important that we use the actualType to decode bc user passed struct types
+ // won't have schemas
+ SqlType> actualType = getColumnType(columnIndex);
+ checkNonNullOfType(columnIndex, mapType, actualType, columnName);
+ Value value = values().get(columnIndex);
+ return (Map) decodeValue(value, actualType);
+ }
+
+ Object decodeValue(Value value, SqlType> type) {
+ if (value.getKindCase().equals(KindCase.KIND_NOT_SET)) {
+ return null;
+ }
+ switch (type.getCode()) {
+ case BYTES:
+ return value.getBytesValue();
+ case STRING:
+ return value.getStringValue();
+ case INT64:
+ return value.getIntValue();
+ case FLOAT64:
+ case FLOAT32:
+ return value.getFloatValue();
+ case BOOL:
+ return value.getBoolValue();
+ case TIMESTAMP:
+ return toInstant(value.getTimestampValue());
+ case DATE:
+ return fromProto(value.getDateValue());
+ case STRUCT:
+ SqlType.Struct schema = (SqlType.Struct) type;
+ // A struct value is represented as an array
+ return ProtoStruct.create(schema, value.getArrayValue());
+ case ARRAY:
+ ArrayList listBuilder = new ArrayList<>();
+ SqlType.Array> arrayType = (SqlType.Array>) type;
+ SqlType> elemType = arrayType.getElementType();
+ for (Value elem : value.getArrayValue().getValuesList()) {
+ listBuilder.add(decodeValue(elem, elemType));
+ }
+ // We use unmodifiableList instead of guava ImmutableList to allow null elements
+ return Collections.unmodifiableList(listBuilder);
+ case MAP:
+ HashMap mapBuilder = new HashMap<>();
+ SqlType.Map, ?> mapType = (SqlType.Map, ?>) type;
+ SqlType> keyType = mapType.getKeyType();
+ SqlType> valType = mapType.getValueType();
+ // A map value is represented as an array of k, v tuples where the tuple is a nested array
+ for (Value entry : value.getArrayValue().getValuesList()) {
+ Object key = decodeValue(entry.getArrayValue().getValues(0), keyType);
+ Object val = decodeValue(entry.getArrayValue().getValues(1), valType);
+ mapBuilder.put(key, val);
+ }
+ // We use unmodifiableMap instead of guava ImmutableMap to allow null keys & values
+ return Collections.unmodifiableMap(mapBuilder);
+ default:
+ // We should have already thrown an exception in the SqlRowMerger
+ throw new IllegalStateException("Unrecognized type: " + type);
+ }
+ }
+
+ private void checkNonNullOfType(
+ int columnIndex, SqlType> expectedType, Object columnNameForError) {
+ SqlType> actualType = getColumnType(columnIndex);
+ checkNonNullOfType(columnIndex, expectedType, actualType, columnNameForError);
+ }
+
+ private void checkNonNullOfType(
+ int columnIndex, SqlType> expectedType, SqlType> actualType, Object columnNameForError) {
+ Preconditions.checkState(
+ SqlType.typesMatch(expectedType, actualType),
+ "Column %s is not of correct type: expected %s but was %s",
+ columnNameForError,
+ expectedType.toString(),
+ actualType.toString());
+ if (isNull(columnIndex)) {
+ throw new NullPointerException("Column " + columnNameForError + " contains NULL value");
+ }
+ }
+
+ private Instant toInstant(Timestamp timestamp) {
+ return Instant.ofEpochSecond(timestamp.getSeconds(), timestamp.getNanos());
+ }
+
+ private Date fromProto(com.google.type.Date proto) {
+ return Date.fromYearMonthDay(proto.getYear(), proto.getMonth(), proto.getDay());
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ColumnMetadataImpl.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ColumnMetadataImpl.java
new file mode 100644
index 0000000000..966cca5e60
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ColumnMetadataImpl.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.internal;
+
+import com.google.api.core.InternalApi;
+import com.google.auto.value.AutoValue;
+import com.google.cloud.bigtable.data.v2.models.sql.ColumnMetadata;
+import com.google.cloud.bigtable.data.v2.models.sql.SqlType;
+
+/**
+ * Implementation of {@link ColumnMetadata} using AutoValue
+ *
+ * This is considered an internal implementation detail and not meant to be used by applications.
+ */
+@InternalApi("For internal use only")
+@AutoValue
+public abstract class ColumnMetadataImpl implements ColumnMetadata {
+ public static ColumnMetadata create(String name, SqlType> type) {
+ return new AutoValue_ColumnMetadataImpl(name, type);
+ }
+
+ static ColumnMetadata fromProto(com.google.bigtable.v2.ColumnMetadata proto) {
+ return create(proto.getName(), SqlType.fromProto(proto.getType()));
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ColumnToIndexMapper.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ColumnToIndexMapper.java
new file mode 100644
index 0000000000..aec1c5897c
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ColumnToIndexMapper.java
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.internal;
+
+import com.google.api.core.InternalApi;
+import com.google.cloud.bigtable.data.v2.models.sql.ColumnMetadata;
+import com.google.common.collect.ImmutableMap;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * This is an internal helper to share the index to column name lookup and the handling of ambiguous
+ * columns described below for Rows and Structs
+ */
+@InternalApi
+public abstract class ColumnToIndexMapper {
+ // It is valid for a select query to return columns with the same name. This marker is used
+ // internally in the client to designate that getting a value by column name is invalid and will
+ // be converted into an exception.
+ private static final int AMBIGUOUS_FIELD_MARKER = -1;
+
+ private Map columnNameMapping;
+
+ protected ColumnToIndexMapper(List extends ColumnMetadata> columns) {
+ columnNameMapping = buildColumnNameMapping(columns);
+ }
+
+ public int getColumnIndex(String columnName) {
+ Integer index = columnNameMapping.get(columnName);
+ if (index == null) {
+ throw new IllegalArgumentException("Column name not found: " + columnName);
+ }
+ int unboxedIndex = index;
+ if (unboxedIndex == AMBIGUOUS_FIELD_MARKER) {
+ throw new IllegalArgumentException(
+ "Ambiguous column name: " + columnName + ". Same name is used for multiple columns.");
+ }
+ return unboxedIndex;
+ }
+
+ private Map buildColumnNameMapping(List extends ColumnMetadata> columns) {
+ HashMap mapping = new HashMap<>(columns.size());
+ for (int i = 0; i < columns.size(); i++) {
+ String columnName = columns.get(i).name();
+ if (mapping.containsKey(columnName)) {
+ mapping.put(columnName, AMBIGUOUS_FIELD_MARKER);
+ } else {
+ mapping.put(columnName, i);
+ }
+ }
+ return ImmutableMap.copyOf(mapping);
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ProtoResultSetMetadata.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ProtoResultSetMetadata.java
new file mode 100644
index 0000000000..36bbdf5008
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ProtoResultSetMetadata.java
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.internal;
+
+import com.google.api.core.InternalApi;
+import com.google.bigtable.v2.ProtoSchema;
+import com.google.bigtable.v2.ResultSetMetadata.SchemaCase;
+import com.google.bigtable.v2.Type;
+import com.google.cloud.bigtable.data.v2.models.sql.ColumnMetadata;
+import com.google.cloud.bigtable.data.v2.models.sql.ResultSetMetadata;
+import com.google.cloud.bigtable.data.v2.models.sql.SqlType;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import java.util.List;
+import javax.annotation.Nullable;
+
+/**
+ * Implementation of {@link ResultSetMetadata} using an underlying protobuf schema.
+ *
+ * This is considered an internal implementation detail and not meant to be used by applications.
+ */
+@InternalApi
+public class ProtoResultSetMetadata extends ColumnToIndexMapper implements ResultSetMetadata {
+ private final List columns;
+
+ public static ResultSetMetadata create(List columns) {
+ return new ProtoResultSetMetadata(columns);
+ }
+
+ private ProtoResultSetMetadata(List columns) {
+ super(columns);
+ this.columns = ImmutableList.copyOf(columns);
+ }
+
+ @Override
+ public List getColumns() {
+ return columns;
+ }
+
+ @Override
+ public SqlType> getColumnType(int columnIndex) {
+ return columns.get(columnIndex).type();
+ }
+
+ @Override
+ public SqlType> getColumnType(String columnName) {
+ return getColumnType(getColumnIndex(columnName));
+ }
+
+ @InternalApi
+ public static ResultSetMetadata fromProto(com.google.bigtable.v2.ResultSetMetadata proto) {
+ Preconditions.checkState(
+ proto.getSchemaCase().equals(SchemaCase.PROTO_SCHEMA),
+ "Unsupported schema type: %s",
+ proto.getSchemaCase().name());
+ ProtoSchema schema = proto.getProtoSchema();
+ validateSchema(schema);
+ ImmutableList.Builder columnsBuilder = ImmutableList.builder();
+ for (com.google.bigtable.v2.ColumnMetadata protoColumn : schema.getColumnsList()) {
+ columnsBuilder.add(ColumnMetadataImpl.fromProto(protoColumn));
+ }
+ return create(columnsBuilder.build());
+ }
+
+ private static void validateSchema(ProtoSchema schema) {
+ List columns = schema.getColumnsList();
+ Preconditions.checkState(!columns.isEmpty(), "columns cannot be empty");
+ for (com.google.bigtable.v2.ColumnMetadata column : columns) {
+ Preconditions.checkState(
+ column.getType().getKindCase() != Type.KindCase.KIND_NOT_SET,
+ "Column type cannot be empty");
+ }
+ }
+
+ @Override
+ public boolean equals(@Nullable Object other) {
+ if (other instanceof ProtoResultSetMetadata) {
+ ProtoResultSetMetadata o = (ProtoResultSetMetadata) other;
+ // columnNameMapping is derived from columns, so we only need to compare columns
+ return columns.equals(o.columns);
+ }
+ return false;
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ProtoSqlRow.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ProtoSqlRow.java
new file mode 100644
index 0000000000..3a63fe089a
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ProtoSqlRow.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.internal;
+
+import com.google.api.core.InternalApi;
+import com.google.auto.value.AutoValue;
+import com.google.bigtable.v2.Value;
+import com.google.cloud.bigtable.data.v2.models.sql.ResultSetMetadata;
+import com.google.cloud.bigtable.data.v2.models.sql.SqlType;
+import java.util.List;
+
+@InternalApi
+@AutoValue
+public abstract class ProtoSqlRow extends AbstractProtoStructReader implements SqlRow {
+ /**
+ * Creates a new SqlRow
+ *
+ * @param metadata the {@link ResultSetMetadata} for the results
+ * @param values list of the values for each column
+ */
+ public static ProtoSqlRow create(ResultSetMetadata metadata, List values) {
+ return new AutoValue_ProtoSqlRow(values, metadata);
+ }
+
+ /** {@link ResultSetMetadata} describing the schema of the row. */
+ abstract ResultSetMetadata metadata();
+
+ @Override
+ public int getColumnIndex(String columnName) {
+ return metadata().getColumnIndex(columnName);
+ }
+
+ @Override
+ public SqlType> getColumnType(int columnIndex) {
+ return metadata().getColumnType(columnIndex);
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ProtoStruct.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ProtoStruct.java
new file mode 100644
index 0000000000..f9da3ef9fb
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ProtoStruct.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.internal;
+
+import com.google.api.core.InternalApi;
+import com.google.auto.value.AutoValue;
+import com.google.bigtable.v2.ArrayValue;
+import com.google.bigtable.v2.Value;
+import com.google.cloud.bigtable.data.v2.models.sql.SqlType;
+import com.google.cloud.bigtable.data.v2.models.sql.Struct;
+import java.util.List;
+
+/**
+ * Implementation of a {@link Struct} backed by protobuf {@link Value}s.
+ *
+ * This is considered an internal implementation detail and not meant to be used by applications.
+ */
+@InternalApi("For internal use only")
+@AutoValue
+public abstract class ProtoStruct extends AbstractProtoStructReader implements Struct {
+
+ @InternalApi
+ static ProtoStruct create(SqlType.Struct type, ArrayValue fieldValues) {
+ return new AutoValue_ProtoStruct(type, fieldValues);
+ }
+
+ protected abstract SqlType.Struct type();
+
+ protected abstract ArrayValue fieldValues();
+
+ @Override
+ List values() {
+ return fieldValues().getValuesList();
+ }
+
+ @Override
+ public int getColumnIndex(String columnName) {
+ return type().getColumnIndex(columnName);
+ }
+
+ @Override
+ public SqlType> getColumnType(int columnIndex) {
+ return type().getType(columnIndex);
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ResultSetImpl.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ResultSetImpl.java
new file mode 100644
index 0000000000..1d2bd37f2d
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/ResultSetImpl.java
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.internal;
+
+import com.google.api.core.ApiFuture;
+import com.google.api.core.InternalApi;
+import com.google.api.gax.rpc.ApiExceptions;
+import com.google.api.gax.rpc.ServerStream;
+import com.google.cloud.Date;
+import com.google.cloud.bigtable.data.v2.models.sql.ResultSet;
+import com.google.cloud.bigtable.data.v2.models.sql.ResultSetMetadata;
+import com.google.cloud.bigtable.data.v2.models.sql.SqlType;
+import com.google.cloud.bigtable.data.v2.models.sql.Struct;
+import com.google.cloud.bigtable.data.v2.models.sql.StructReader;
+import com.google.cloud.bigtable.data.v2.stub.sql.SqlServerStream;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import org.threeten.bp.Instant;
+
+/**
+ * The primary implementation of a ResultSet.
+ *
+ * This passes through StructReader calls to each row rather than implementing
+ * AbstractProtoStructReader directly so that it can support different types of rows in the future.
+ *
+ *
This is considered an internal implementation detail and not meant to be used by applications.
+ */
+@InternalApi("For internal use only")
+public class ResultSetImpl implements ResultSet, StructReader {
+
+ private final ServerStream serverStream;
+ private final Iterator rowIterator;
+ private final ApiFuture metadataApiFuture;
+ private boolean consumed;
+ private SqlRow currentRow;
+
+ public static ResultSet create(SqlServerStream sqlServerStream) {
+ return new ResultSetImpl(sqlServerStream);
+ }
+
+ private ResultSetImpl(SqlServerStream sqlServerStream) {
+ this.serverStream = sqlServerStream.rows();
+ this.rowIterator = serverStream.iterator();
+ this.metadataApiFuture = sqlServerStream.metadataFuture();
+ this.consumed = false;
+ }
+
+ private SqlRow getCurrentRow() {
+ Preconditions.checkState(!consumed, "Attempted to access data from closed ResultSet");
+ Preconditions.checkState(currentRow != null, "Attempted to access data before calling next()");
+ return currentRow;
+ }
+
+ @Override
+ public boolean next() {
+ if (consumed) {
+ return false;
+ }
+ boolean hasNext = rowIterator.hasNext();
+ if (hasNext) {
+ currentRow = rowIterator.next();
+ } else {
+ consumed = true;
+ }
+ return hasNext;
+ }
+
+ @Override
+ public ResultSetMetadata getMetadata() {
+ return ApiExceptions.callAndTranslateApiException(metadataApiFuture);
+ }
+
+ @Override
+ public void close() {
+ // If the stream has been consumed we don't want to cancel because it could
+ // cancel the request before it receives response trailers.
+ if (!consumed) {
+ serverStream.cancel();
+ }
+ consumed = true;
+ }
+
+ @Override
+ public boolean isNull(int columnIndex) {
+ return getCurrentRow().isNull(columnIndex);
+ }
+
+ @Override
+ public boolean isNull(String columnName) {
+ return getCurrentRow().isNull(columnName);
+ }
+
+ @Override
+ public ByteString getBytes(int columnIndex) {
+ return getCurrentRow().getBytes(columnIndex);
+ }
+
+ @Override
+ public ByteString getBytes(String columnName) {
+ return getCurrentRow().getBytes(columnName);
+ }
+
+ @Override
+ public String getString(int columnIndex) {
+ return getCurrentRow().getString(columnIndex);
+ }
+
+ @Override
+ public String getString(String columnName) {
+ return getCurrentRow().getString(columnName);
+ }
+
+ @Override
+ public long getLong(int columnIndex) {
+ return getCurrentRow().getLong(columnIndex);
+ }
+
+ @Override
+ public long getLong(String columnName) {
+ return getCurrentRow().getLong(columnName);
+ }
+
+ @Override
+ public double getDouble(int columnIndex) {
+ return getCurrentRow().getDouble(columnIndex);
+ }
+
+ @Override
+ public double getDouble(String columnName) {
+ return getCurrentRow().getDouble(columnName);
+ }
+
+ @Override
+ public float getFloat(int columnIndex) {
+ return getCurrentRow().getFloat(columnIndex);
+ }
+
+ @Override
+ public float getFloat(String columnName) {
+ return getCurrentRow().getFloat(columnName);
+ }
+
+ @Override
+ public boolean getBoolean(int columnIndex) {
+ return getCurrentRow().getBoolean(columnIndex);
+ }
+
+ @Override
+ public boolean getBoolean(String columnName) {
+ return getCurrentRow().getBoolean(columnName);
+ }
+
+ @Override
+ public Instant getTimestamp(int columnIndex) {
+ return getCurrentRow().getTimestamp(columnIndex);
+ }
+
+ @Override
+ public Instant getTimestamp(String columnName) {
+ return getCurrentRow().getTimestamp(columnName);
+ }
+
+ @Override
+ public Date getDate(int columnIndex) {
+ return getCurrentRow().getDate(columnIndex);
+ }
+
+ @Override
+ public Date getDate(String columnName) {
+ return getCurrentRow().getDate(columnName);
+ }
+
+ @Override
+ public Struct getStruct(int columnIndex) {
+ return getCurrentRow().getStruct(columnIndex);
+ }
+
+ @Override
+ public Struct getStruct(String columnName) {
+ return getCurrentRow().getStruct(columnName);
+ }
+
+ @Override
+ public List getList(int columnIndex, SqlType.Array arrayType) {
+ return getCurrentRow().getList(columnIndex, arrayType);
+ }
+
+ @Override
+ public List getList(String columnName, SqlType.Array arrayType) {
+ return getCurrentRow().getList(columnName, arrayType);
+ }
+
+ @Override
+ public Map getMap(int columnIndex, SqlType.Map mapType) {
+ return getCurrentRow().getMap(columnIndex, mapType);
+ }
+
+ @Override
+ public Map getMap(String columnName, SqlType.Map mapType) {
+ return getCurrentRow().getMap(columnName, mapType);
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/SqlRow.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/SqlRow.java
new file mode 100644
index 0000000000..6ddde59155
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/SqlRow.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.internal;
+
+import com.google.api.core.InternalApi;
+import com.google.cloud.bigtable.data.v2.models.sql.StructReader;
+import java.io.Serializable;
+
+/** Internal implementation detail that provides access to row data for SQL requests. */
+@InternalApi
+public interface SqlRow extends StructReader, Serializable {}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/SqlRowMergerUtil.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/SqlRowMergerUtil.java
new file mode 100644
index 0000000000..edb8cf6dcf
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/SqlRowMergerUtil.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.internal;
+
+import com.google.api.core.BetaApi;
+import com.google.api.core.InternalApi;
+import com.google.bigtable.v2.ExecuteQueryResponse;
+import com.google.cloud.bigtable.data.v2.stub.sql.SqlRowMerger;
+import com.google.common.collect.ImmutableList;
+import java.util.List;
+
+/**
+ * Wrapper around {@link SqlRowMerger} that provides an easy way to transform a set of
+ * ExecuteQueryResponses into rows. Must create a new instance per ExecuteQueryRequest, and pass in
+ * the response stream of ExecuteQueryResponses in the order they were received.
+ */
+@InternalApi("For internal use only")
+@BetaApi
+public class SqlRowMergerUtil implements AutoCloseable {
+
+ private final SqlRowMerger merger;
+
+ public SqlRowMergerUtil() {
+ merger = new SqlRowMerger();
+ }
+
+ @Override
+ public void close() {
+ if (merger.hasPartialFrame()) {
+ throw new IllegalStateException("Tried to close SqlRowMerger with unconsumed partial data");
+ }
+ }
+
+ /**
+ * Transforms a list of {@link ExecuteQueryResponse} objects into a list of {@link
+ * com.google.cloud.bigtable.data.v2.internal.ProtoSqlRow} objects . The first call must contain
+ * the ResultSetMetadata as the first ExecuteQueryResponse. This will return any complete {@link
+ * com.google.cloud.bigtable.data.v2.internal.ProtoSqlRow}s from the given responses and buffer
+ * partial rows waiting for the next ExecuteQueryResponse.
+ *
+ * @param responses List of {@link ExecuteQueryResponse} for a query
+ * @return a list of the complete {@link com.google.cloud.bigtable.data.v2.internal.ProtoSqlRow}s
+ * that have been merged from the given responses.
+ */
+ public List parseExecuteQueryResponses(ImmutableList responses) {
+ ImmutableList.Builder rows = new ImmutableList.Builder<>();
+
+ for (ExecuteQueryResponse response : responses) {
+ merger.push(response);
+ while (merger.hasFullFrame()) {
+ rows.add(merger.pop());
+ }
+ }
+ return rows.build();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/ColumnMetadata.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/ColumnMetadata.java
new file mode 100644
index 0000000000..0a722a914d
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/ColumnMetadata.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.models.sql;
+
+import com.google.api.core.BetaApi;
+
+/** Represents the metadata for a column in a {@link ResultSet} */
+@BetaApi
+public interface ColumnMetadata {
+ /** The name of the column. Returns Empty string if the column has no name */
+ String name();
+
+ /** The {@link SqlType} of the column */
+ SqlType> type();
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/ResultSet.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/ResultSet.java
new file mode 100644
index 0000000000..807e995712
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/ResultSet.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.models.sql;
+
+import com.google.api.core.BetaApi;
+
+/**
+ * A set of SQL data, generated as the result of an ExecuteQuery request.
+ *
+ * This allows access to the data of one row at a time using the methods from the {@code
+ * StructReader} interface. The rows are read in the order of the query results. To advance to the
+ * next row call {@link #next}. This returns {@code false} once all the rows have been iterated
+ * over. The result set is initially positioned before the first row, so {@link #next} must be
+ * called before reading any data.
+ *
+ *
{@link #getMetadata()} may be called before calling next. It will block until the metadata has
+ * been received.
+ *
+ *
{@code ResultSet} implementations may buffer data ahead and/or maintain a persistent streaming
+ * connection to the remote service until all data has been returned or the resultSet closed. As
+ * such, it is important that all uses of {@code ResultSet} either fully consume it (that is, call
+ * {@code next()} until {@code false} is returned or it throws an exception) or explicitly call
+ * {@link #close()}: failure to do so may result in wasted work or leaked resources.
+ *
+ *
{@code ResultSet} implementations are not required to be thread-safe: the thread that asked
+ * for a ResultSet must be the one that interacts with it.
+ */
+@BetaApi
+public interface ResultSet extends StructReader, AutoCloseable {
+
+ /**
+ * Advances the result set to the next row, returning {@code false} if no such row exists. Calls
+ * to data access methods will throw an exception after next has returned {@code False}.
+ */
+ boolean next();
+
+ /**
+ * Returns the {@link ResultSetMetadata} for the ResultSet. Blocks until the underlying request
+ * receives the metadata.
+ */
+ ResultSetMetadata getMetadata();
+
+ /**
+ * Closes the result set and cancels the underlying request if it is still open. This must always
+ * be called when disposing of a {@code ResultSet} before {@link #next()} has returned {@code
+ * false} or raised an exception. Calling {@code close()} is also allowed if the result set has
+ * been fully consumed, so a recommended practice is to unconditionally close the result set once
+ * it is done with, typically using a try-with-resources construct.
+ */
+ @Override
+ void close();
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/ResultSetMetadata.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/ResultSetMetadata.java
new file mode 100644
index 0000000000..23e7155e67
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/ResultSetMetadata.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.models.sql;
+
+import com.google.api.core.BetaApi;
+import java.util.List;
+
+/** Provides information about the schema of a {@link ResultSet}. */
+@BetaApi
+public interface ResultSetMetadata {
+
+ /** @return full list of {@link ColumnMetadata} for each column in the {@link ResultSet}. */
+ List getColumns();
+
+ /**
+ * @param columnIndex index of the column
+ * @return the {@link SqlType} of the column at the given index
+ */
+ SqlType> getColumnType(int columnIndex);
+
+ /**
+ * @param columnName name of the column
+ * @return the {@link SqlType} of the column with the given name
+ * @throws IllegalArgumentException if there is no column with the name *or* if there are multiple
+ * columns with the given name
+ */
+ SqlType> getColumnType(String columnName);
+
+ /**
+ * @param columnName name of the column
+ * @return index of the column with the given name
+ * @throws IllegalArgumentException if there is no column with the name *or* if there are multiple
+ * columns with the given name
+ */
+ int getColumnIndex(String columnName);
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/SqlType.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/SqlType.java
new file mode 100644
index 0000000000..50146f292a
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/SqlType.java
@@ -0,0 +1,286 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.models.sql;
+
+import com.google.api.core.BetaApi;
+import com.google.api.core.InternalApi;
+import com.google.cloud.Date;
+import com.google.cloud.bigtable.common.Type;
+import com.google.cloud.bigtable.common.Type.SchemalessStruct;
+import com.google.cloud.bigtable.common.Type.StructWithSchema;
+import com.google.protobuf.ByteString;
+import java.io.Serializable;
+import java.util.List;
+import org.threeten.bp.Instant;
+
+/**
+ * Represents a data type in a SQL query.
+ *
+ * Complex types ({@link SqlType.Map}, {@link SqlType.Array}, & {@link SqlType.Struct} provide
+ * additional information about the schema of the type.
+ *
+ * @param the corresponding java type
+ */
+@BetaApi
+public interface SqlType extends Serializable {
+
+ /* Enumeration of the types */
+ enum Code {
+ BYTES,
+ STRING,
+ INT64,
+ FLOAT64,
+ FLOAT32,
+ BOOL,
+ TIMESTAMP,
+ DATE,
+ STRUCT,
+ ARRAY,
+ MAP
+ }
+
+ /** @return {@link Code} enum for this type */
+ Code getCode();
+
+ /**
+ * Represents a map type in SQL. Provides access to the key and value types for the map.
+ *
+ * @param Java type of the Map key data
+ * @param Java type of the Map value data
+ */
+ interface Map extends SqlType> {
+ /** @return {@link SqlType} of the map's key */
+ SqlType getKeyType();
+
+ /** @return {@link SqlType} of the map's value */
+ SqlType getValueType();
+ }
+
+ /**
+ * Represents an array type in SQL. Provides access to the element type of the array.
+ *
+ * @param Java type of the Array element data
+ */
+ interface Array extends SqlType> {
+ /** @return {@link SqlType} of the array's elements */
+ SqlType getElementType();
+ }
+
+ /**
+ * Represents a struct type in SQL. A struct is an ordered collection of named and type fields.
+ */
+ interface Struct extends SqlType {
+ // This extends ColumnMetadata so that we can reuse some helpers for both types
+ /** Represents a field in a struct */
+ interface Field extends ColumnMetadata {
+ /** @return the name of the field. Returns an empty string for fields without names. */
+ String name();
+
+ /** @return the {@link SqlType} of the field */
+ SqlType> type();
+ }
+
+ /** @return the ordered list of {@link Field}s for the struct */
+ List extends Field> getFields();
+
+ /**
+ * @param fieldIndex index of the field
+ * @return the {@link SqlType} of the field at the given index
+ */
+ SqlType> getType(int fieldIndex);
+
+ /**
+ * @param fieldName name of the field
+ * @return the {@link SqlType} of the field with the given name
+ * @throws IllegalArgumentException if there is no field with the name *or* if there are
+ * multiple columns with the given name
+ */
+ SqlType> getType(String fieldName);
+
+ /**
+ * @param fieldName name of the field
+ * @return the field index of the field with the given name
+ * @throws IllegalArgumentException if there is no field with the name *or* if there are
+ * multiple columns with the given name
+ */
+ int getColumnIndex(String fieldName);
+ }
+
+ /** returns a {@link SqlType} for the {@code BYTES} type. */
+ static SqlType bytes() {
+ return Type.Bytes.create();
+ }
+
+ /** returns a {@link SqlType} for the {@code STRING} type. */
+ static SqlType string() {
+ return Type.String.create();
+ }
+
+ /** returns a {@link SqlType} for the {@code INT64} type. */
+ static SqlType int64() {
+ return Type.Int64.create();
+ }
+
+ /** returns a {@link SqlType} for the {@code FLOAT64} type. */
+ static SqlType float64() {
+ return Type.Float64.create();
+ }
+
+ /** returns a {@link SqlType} for the {@code FLOAT32} type. */
+ static SqlType float32() {
+ return Type.Float32.create();
+ }
+
+ /** returns a {@link SqlType} for the {@code BOOL} type. */
+ static SqlType bool() {
+ return Type.Bool.create();
+ }
+
+ /** returns a {@link SqlType} for the {@code TIMESTAMP} type. */
+ static SqlType timestamp() {
+ return Type.Timestamp.create();
+ }
+
+ /** returns a {@link SqlType} for the {@code DATE} type. */
+ static SqlType date() {
+ return Type.Date.create();
+ }
+
+ /**
+ * returns a fake {@code STRUCT type} for use on in {@link StructReader} methods that require a
+ * {@link SqlType} to validate against. This does not specify a schema because the struct schem
+ * will be validated on calls to the structs data accessors.
+ *
+ * Attempts to access the schema of a struct created this way will throw exceptions.
+ *
+ *
Example usage:
+ *
{@code
+ * List structList = resultSet.getList("column", SqlType.arrayOf(SqlType.struct()));
+ * }
+ */
+ static SqlType.Struct struct() {
+ return SchemalessStruct.create();
+ }
+
+ /** returns a {@link SqlType} for an {@code ARRAY} with elements of type {@code elemType} */
+ static SqlType.Array arrayOf(SqlType elemType) {
+ return Type.Array.create(elemType);
+ }
+
+ /**
+ * returns a {@link SqlType} for a @code MAP} with keys of type {@code keyType} and values of type
+ * {@code valType}
+ */
+ static SqlType.Map mapOf(SqlType keyType, SqlType valType) {
+ return Type.Map.create(keyType, valType);
+ }
+
+ /**
+ * returns the {@link SqlType} for the type returned for column families in {@code with_history}
+ * queries. This is equivalent to {@code SqlType.mapOf(SqlType.bytes(),
+ * SqlType.arrayOf(SqlType.struct()))}
+ */
+ static SqlType.Map>
+ historicalMap() {
+ return mapOf(bytes(), arrayOf(struct()));
+ }
+
+ /**
+ * Creates a {@link SqlType} from the protobuf representation of Types.
+ *
+ * This is considered an internal implementation detail and not meant to be used by
+ * applications.
+ */
+ @InternalApi
+ static SqlType> fromProto(com.google.bigtable.v2.Type proto) {
+ switch (proto.getKindCase()) {
+ case BYTES_TYPE:
+ return bytes();
+ case STRING_TYPE:
+ return string();
+ case INT64_TYPE:
+ return int64();
+ case FLOAT64_TYPE:
+ return float64();
+ case FLOAT32_TYPE:
+ return float32();
+ case BOOL_TYPE:
+ return bool();
+ case TIMESTAMP_TYPE:
+ return timestamp();
+ case DATE_TYPE:
+ return date();
+ case STRUCT_TYPE:
+ return StructWithSchema.fromProto(proto.getStructType());
+ case ARRAY_TYPE:
+ return arrayOf(fromProto(proto.getArrayType().getElementType()));
+ case MAP_TYPE:
+ com.google.bigtable.v2.Type.Map mapType = proto.getMapType();
+ return mapOf(fromProto(mapType.getKeyType()), fromProto(mapType.getValueType()));
+ case KIND_NOT_SET:
+ throw new IllegalStateException("Unrecognized Type. You may need to update your client.");
+ default:
+ throw new IllegalStateException("Unexpected Type: " + proto.getKindCase().name());
+ }
+ }
+
+ /**
+ * This can be used to check whether {@link
+ * com.google.cloud.bigtable.data.v2.models.sql.StructReader} get calls are being called for the
+ * correct type when compared to the schema. This is different that equals because we do not
+ * require users to specify the full struct schema for struct get calls. This is safe because the
+ * struct schema will be validated on calls to the struct.
+ *
+ *
This is considered an internal implementation detail and not meant to be used by
+ * applications.
+ */
+ @InternalApi
+ static boolean typesMatch(SqlType> left, SqlType> right) {
+ switch (left.getCode()) {
+ case BYTES:
+ case STRING:
+ case INT64:
+ case FLOAT64:
+ case FLOAT32:
+ case BOOL:
+ case TIMESTAMP:
+ case DATE:
+ return left.equals(right);
+ case STRUCT:
+ // Don't validate fields since the field types will be validated on
+ // accessor calls to struct
+ return left.getCode().equals(right.getCode());
+ case ARRAY:
+ if (!left.getCode().equals(right.getCode())) {
+ return false;
+ }
+ SqlType.Array> leftArray = (SqlType.Array>) left;
+ SqlType.Array> rightArray = (SqlType.Array>) right;
+ return typesMatch(leftArray.getElementType(), rightArray.getElementType());
+ case MAP:
+ if (!left.getCode().equals(right.getCode())) {
+ return false;
+ }
+ SqlType.Map, ?> leftMap = (SqlType.Map, ?>) left;
+ SqlType.Map, ?> rightMap = (SqlType.Map, ?>) right;
+ boolean keysMatch = typesMatch(leftMap.getKeyType(), rightMap.getKeyType());
+ boolean valuesMatch = typesMatch(leftMap.getValueType(), rightMap.getValueType());
+ return keysMatch && valuesMatch;
+ default:
+ throw new IllegalStateException("Unexpected type: " + left);
+ }
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/Statement.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/Statement.java
new file mode 100644
index 0000000000..c68a9feec5
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/Statement.java
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.models.sql;
+
+import com.google.api.core.BetaApi;
+import com.google.api.core.InternalApi;
+import com.google.bigtable.v2.ExecuteQueryRequest;
+import com.google.bigtable.v2.Type;
+import com.google.bigtable.v2.Value;
+import com.google.cloud.Date;
+import com.google.cloud.bigtable.data.v2.internal.NameUtil;
+import com.google.cloud.bigtable.data.v2.internal.RequestContext;
+import com.google.common.collect.ImmutableMap;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.Timestamp;
+import java.util.HashMap;
+import java.util.Map;
+import javax.annotation.Nullable;
+import org.threeten.bp.Instant;
+
+/**
+ * A SQL statement that can be executed by calling {@link
+ * com.google.cloud.bigtable.data.v2.BigtableDataClient#executeQuery(Statement)}.
+ *
+ *
A statement contains a SQL string and optional parameters. A parameterized query should
+ * contain placeholders in the form of {@literal @} followed by the parameter name. Parameter names
+ * may consist of any combination of letters, numbers, and underscores.
+ *
+ *
Parameters can appear anywhere that a literal value is expected. The same parameter name can
+ * be used more than once, for example: {@code WHERE cf["qualifier1"] = @value OR cf["qualifier2"]
+ * = @value }
+ *
+ *
It is an error to execute an SQL query with placeholders for unset parameters.
+ *
+ *
Parameterized Statements are constructed using a {@link Builder} and calling
+ * setTypeParam(String paramName, Type value) for the appropriate type. For example:
+ *
+ *
{@code
+ * Statement statement = Statement
+ * .newBuilder("SELECT cf[@qualifer] FROM table WHERE _key=@key")
+ * .setBytesParam("qualifier", ByteString.copyFromUtf8("test"))
+ * .setBytesParam("key", ByteString.copyFromUtf8("testKey"))
+ * .build();
+ * }
+ */
+@BetaApi
+public class Statement {
+
+ private static final Type STRING_TYPE =
+ Type.newBuilder().setStringType(Type.String.getDefaultInstance()).build();
+ private static final Type BYTES_TYPE =
+ Type.newBuilder().setBytesType(Type.Bytes.getDefaultInstance()).build();
+ private static final Type INT64_TYPE =
+ Type.newBuilder().setInt64Type(Type.Int64.getDefaultInstance()).build();
+ private static final Type BOOL_TYPE =
+ Type.newBuilder().setBoolType(Type.Bool.getDefaultInstance()).build();
+ private static final Type TIMESTAMP_TYPE =
+ Type.newBuilder().setTimestampType(Type.Timestamp.getDefaultInstance()).build();
+ private static final Type DATE_TYPE =
+ Type.newBuilder().setDateType(Type.Date.getDefaultInstance()).build();
+
+ private final String sql;
+ private final Map params;
+
+ private Statement(String sql, Map params) {
+ this.sql = sql;
+ this.params = params;
+ }
+
+ /** Creates a {@code Statement} with the given SQL query and no query parameters. */
+ public static Statement of(String sql) {
+ return newBuilder(sql).build();
+ }
+
+ /** Creates a new {@code Builder} with the given SQL query */
+ public static Builder newBuilder(String sql) {
+ return new Builder(sql);
+ }
+
+ public static class Builder {
+ private final String sql;
+ private final Map params;
+
+ private Builder(String sql) {
+ this.sql = sql;
+ this.params = new HashMap<>();
+ }
+
+ /** Builds a {@code Statement} from the builder */
+ public Statement build() {
+ return new Statement(sql, ImmutableMap.copyOf(params));
+ }
+
+ /**
+ * Sets a query parameter with the name {@code paramName} and the String typed value {@code
+ * value}
+ */
+ public Builder setStringParam(String paramName, @Nullable String value) {
+ params.put(paramName, stringParamOf(value));
+ return this;
+ }
+
+ /**
+ * Sets a query parameter with the name {@code paramName} and the Bytes typed value {@code
+ * value}
+ */
+ public Builder setBytesParam(String paramName, @Nullable ByteString value) {
+ params.put(paramName, bytesParamOf(value));
+ return this;
+ }
+
+ /**
+ * Sets a query parameter with the name {@code paramName} and the INT64 typed value {@code
+ * value}
+ */
+ public Builder setLongParam(String paramName, @Nullable Long value) {
+ params.put(paramName, int64ParamOf(value));
+ return this;
+ }
+
+ /**
+ * Sets a query parameter with the name {@code paramName} and the BOOL typed value {@code value}
+ */
+ public Builder setBooleanParam(String paramName, @Nullable Boolean value) {
+ params.put(paramName, booleanParamOf(value));
+ return this;
+ }
+
+ /**
+ * Sets a query parameter with the name {@code paramName} and the TIMESTAMP typed value {@code
+ * value}
+ */
+ public Builder setTimestampParam(String paramName, @Nullable Instant value) {
+ params.put(paramName, timestampParamOf(value));
+ return this;
+ }
+
+ /**
+ * Sets a query parameter with the name {@code paramName} and the DATE typed value {@code value}
+ */
+ public Builder setDateParam(String paramName, @Nullable Date value) {
+ params.put(paramName, dateParamOf(value));
+ return this;
+ }
+
+ private static Value stringParamOf(@Nullable String value) {
+ Value.Builder builder = nullValueWithType(STRING_TYPE);
+ if (value != null) {
+ builder.setStringValue(value);
+ }
+ return builder.build();
+ }
+
+ private static Value bytesParamOf(@Nullable ByteString value) {
+ Value.Builder builder = nullValueWithType(BYTES_TYPE);
+ if (value != null) {
+ builder.setBytesValue(value);
+ }
+ return builder.build();
+ }
+
+ private static Value int64ParamOf(@Nullable Long value) {
+ Value.Builder builder = nullValueWithType(INT64_TYPE);
+ if (value != null) {
+ builder.setIntValue(value);
+ }
+ return builder.build();
+ }
+
+ private static Value booleanParamOf(@Nullable Boolean value) {
+ Value.Builder builder = nullValueWithType(BOOL_TYPE);
+ if (value != null) {
+ builder.setBoolValue(value);
+ }
+ return builder.build();
+ }
+
+ private static Value timestampParamOf(@Nullable Instant value) {
+ Value.Builder builder = nullValueWithType(TIMESTAMP_TYPE);
+ if (value != null) {
+ builder.setTimestampValue(
+ Timestamp.newBuilder()
+ .setSeconds(value.getEpochSecond())
+ .setNanos(value.getNano())
+ .build());
+ }
+ return builder.build();
+ }
+
+ private static Value dateParamOf(@Nullable Date value) {
+ Value.Builder builder = nullValueWithType(DATE_TYPE);
+ if (value != null) {
+ builder.setDateValue(
+ com.google.type.Date.newBuilder()
+ .setYear(value.getYear())
+ .setMonth(value.getMonth())
+ .setDay(value.getDayOfMonth())
+ .build());
+ }
+ return builder.build();
+ }
+
+ private static Value.Builder nullValueWithType(Type type) {
+ return Value.newBuilder().setType(type);
+ }
+ }
+
+ /**
+ * Creates the request protobuf. This method is considered an internal implementation detail and
+ * not meant to be used by applications.
+ */
+ @InternalApi("For internal use only")
+ public ExecuteQueryRequest toProto(RequestContext requestContext) {
+ return ExecuteQueryRequest.newBuilder()
+ .setInstanceName(
+ NameUtil.formatInstanceName(
+ requestContext.getProjectId(), requestContext.getInstanceId()))
+ .setAppProfileId(requestContext.getAppProfileId())
+ .setQuery(sql)
+ .putAllParams(params)
+ .build();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/Struct.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/Struct.java
new file mode 100644
index 0000000000..23b113f9f7
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/Struct.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.models.sql;
+
+import com.google.api.core.BetaApi;
+import java.io.Serializable;
+
+/**
+ * The representation of a SQL Struct type. Data can be accessed using the methods from the {@code
+ * StructReader} interface.
+ */
+@BetaApi
+public interface Struct extends StructReader, Serializable {}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/StructReader.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/StructReader.java
new file mode 100644
index 0000000000..8f450bbd92
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/sql/StructReader.java
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.models.sql;
+
+import com.google.api.core.BetaApi;
+import com.google.cloud.Date;
+import com.google.protobuf.ByteString;
+import java.util.List;
+import java.util.Map;
+import org.threeten.bp.Instant;
+
+/**
+ * An interface for reading the columns of a {@code Struct} or {@code
+ * com.google.cloud.bigtable.data.v2.models.sql.ResultSet}.
+ *
+ * This provides accessors for each valid type in the form of {@code getTypeName()}. Attempting
+ * to call these methods for a column of another type will result in an {@code
+ * IllegalStateException}. Each method has an overload accepting both {@code int} column index and
+ * {@code String} column Name. Attempting to call an index-based method with a non-existent index
+ * will result in an {@code IndexOutOfBoundsException}. Attempting to call a columnName based getter
+ * with a column name that does not appear exactly once in the set of fields will result in an
+ * {@code IllegalArgumentException}. Attempting to access a column with a null value will result in
+ * a {@code NullPointerException}; {@link #isNull(int)} & {@link #isNull(String)} can be used to
+ * check for null values.
+ */
+@BetaApi
+public interface StructReader {
+ /**
+ * @param columnIndex index of the column
+ * @return {@code true} if the column contains a {@code NULL} value
+ */
+ boolean isNull(int columnIndex);
+
+ /**
+ * @param columnName name of the column
+ * @return {@code true} if the column contains a {@code NULL} value
+ * @throws IllegalArgumentException if there is not exactly one column with the given name
+ */
+ boolean isNull(String columnName);
+
+ /**
+ * @param columnIndex index of the column
+ * @return {@link ByteString} type value of a non-{@code NULL} column
+ */
+ ByteString getBytes(int columnIndex);
+
+ /**
+ * @param columnName name of the column
+ * @return {@link ByteString} type value of a non-{@code NULL} column
+ */
+ ByteString getBytes(String columnName);
+
+ /**
+ * @param columnIndex index of the column
+ * @return {@link String} type value of a non-{@code NULL} column
+ */
+ String getString(int columnIndex);
+
+ /**
+ * @param columnName name of the column
+ * @return {@link String} type value of a non-{@code NULL} column
+ */
+ String getString(String columnName);
+
+ /**
+ * @param columnIndex index of the column
+ * @return {@link long} type value of a non-{@code NULL} column
+ */
+ long getLong(int columnIndex);
+
+ /**
+ * @param columnName name of the column
+ * @return {@link long} type value of a non-{@code NULL} column
+ */
+ long getLong(String columnName);
+
+ /**
+ * Getter for FLOAT_64 type Sql data
+ *
+ * @param columnIndex index of the column
+ * @return {@link double} type value of a non-{@code NULL} column
+ */
+ double getDouble(int columnIndex);
+
+ /**
+ * Getter for FLOAT_64 type Sql data
+ *
+ * @param columnName name of the column
+ * @return {@link double} type value of a non-{@code NULL} column
+ */
+ double getDouble(String columnName);
+
+ /**
+ * Getter for FLOAT_32 type Sql data
+ *
+ * @param columnIndex index of the column
+ * @return {@link float} type value of a non-{@code NULL} column
+ */
+ float getFloat(int columnIndex);
+
+ /**
+ * Getter for FLOAT_32 type Sql data
+ *
+ * @param columnName name of the column
+ * @return {@link float} type value of a non-{@code NULL} column
+ */
+ float getFloat(String columnName);
+
+ /**
+ * @param columnIndex index of the column
+ * @return {@link boolean} type value of a non-{@code NULL} column
+ */
+ boolean getBoolean(int columnIndex);
+
+ /**
+ * @param columnName name of the column
+ * @return {@link boolean} type value of a non-{@code NULL} column
+ */
+ boolean getBoolean(String columnName);
+
+ /**
+ * @param columnIndex index of the column
+ * @return {@link Instant} type value of a non-{@code NULL} column
+ */
+ Instant getTimestamp(int columnIndex);
+
+ /**
+ * @param columnName name of the column
+ * @return {@link Instant} type value of a non-{@code NULL} column
+ */
+ Instant getTimestamp(String columnName);
+
+ /**
+ * @param columnIndex index of the column
+ * @return {@link Date} type value of a non-{@code NULL} column
+ */
+ Date getDate(int columnIndex);
+
+ /**
+ * @param columnName name of the column
+ * @return {@link Date} type value of a non-{@code NULL} column
+ */
+ Date getDate(String columnName);
+
+ /**
+ * @param columnIndex index of the column
+ * @return {@link com.google.cloud.bigtable.data.v2.models.sql.Struct} type value of a non-{@code
+ * NULL} column
+ */
+ Struct getStruct(int columnIndex);
+
+ /**
+ * @param columnName name of the column
+ * @return {@link com.google.cloud.bigtable.data.v2.models.sql.Struct} type value of a non-{@code
+ * NULL} column
+ */
+ Struct getStruct(String columnName);
+
+ /**
+ * @param columnIndex index of the column
+ * @return {@link List} type value of a non-{@code NULL} column
+ * @param Java type of the list elements
+ */
+ List getList(int columnIndex, SqlType.Array arrayType);
+
+ /**
+ * @param columnName name of the column
+ * @return {@link List} type value of a non-{@code NULL} column
+ * @param Java type of the list elements
+ */
+ List getList(String columnName, SqlType.Array arrayType);
+
+ /**
+ * @param columnIndex index of the column
+ * @return {@link Map} type value of a non-{@code NULL} column
+ * @param Java type of the map keys
+ * @param Java type of the map values
+ */
+ Map getMap(int columnIndex, SqlType.Map mapType);
+
+ /**
+ * @param columnName name of the column
+ * @return {@link Map} type value of a non-{@code NULL} column
+ * @param Java type of the map keys
+ * @param Java type of the map values
+ */
+ Map getMap(String columnName, SqlType.Map mapType);
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStub.java
index 01bc5d9e85..9d887d6ccd 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStub.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStub.java
@@ -22,6 +22,8 @@
import com.google.api.gax.rpc.UnaryCallable;
import com.google.bigtable.v2.CheckAndMutateRowRequest;
import com.google.bigtable.v2.CheckAndMutateRowResponse;
+import com.google.bigtable.v2.ExecuteQueryRequest;
+import com.google.bigtable.v2.ExecuteQueryResponse;
import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest;
import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse;
import com.google.bigtable.v2.MutateRowRequest;
@@ -90,6 +92,10 @@ public UnaryCallable pingAndWarmCallabl
throw new UnsupportedOperationException("Not implemented: readChangeStreamCallable()");
}
+ public ServerStreamingCallable executeQueryCallable() {
+ throw new UnsupportedOperationException("Not implemented: executeQueryCallable()");
+ }
+
@Override
public abstract void close();
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java
index 2cfd109ebe..22d394fe57 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java
@@ -34,6 +34,8 @@
import com.google.api.gax.rpc.UnaryCallSettings;
import com.google.bigtable.v2.CheckAndMutateRowRequest;
import com.google.bigtable.v2.CheckAndMutateRowResponse;
+import com.google.bigtable.v2.ExecuteQueryRequest;
+import com.google.bigtable.v2.ExecuteQueryResponse;
import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest;
import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse;
import com.google.bigtable.v2.MutateRowRequest;
@@ -92,6 +94,8 @@ public class BigtableStubSettings extends StubSettings {
generateInitialChangeStreamPartitionsSettings;
private final ServerStreamingCallSettings
readChangeStreamSettings;
+ private final ServerStreamingCallSettings
+ executeQuerySettings;
/** Returns the object with the settings used for calls to readRows. */
public ServerStreamingCallSettings readRowsSettings() {
@@ -147,6 +151,12 @@ public UnaryCallSettings pingAndWarmSet
return readChangeStreamSettings;
}
+ /** Returns the object with the settings used for calls to executeQuery. */
+ public ServerStreamingCallSettings
+ executeQuerySettings() {
+ return executeQuerySettings;
+ }
+
public BigtableStub createStub() throws IOException {
if (getTransportChannelProvider()
.getTransportName()
@@ -236,6 +246,7 @@ protected BigtableStubSettings(Builder settingsBuilder) throws IOException {
generateInitialChangeStreamPartitionsSettings =
settingsBuilder.generateInitialChangeStreamPartitionsSettings().build();
readChangeStreamSettings = settingsBuilder.readChangeStreamSettings().build();
+ executeQuerySettings = settingsBuilder.executeQuerySettings().build();
}
/** Builder for BigtableStubSettings. */
@@ -261,6 +272,8 @@ public static class Builder extends StubSettings.Builder
readChangeStreamSettings;
+ private final ServerStreamingCallSettings.Builder
+ executeQuerySettings;
private static final ImmutableMap>
RETRYABLE_CODE_DEFINITIONS;
@@ -373,6 +386,7 @@ protected Builder(ClientContext clientContext) {
readModifyWriteRowSettings = UnaryCallSettings.newUnaryCallSettingsBuilder();
generateInitialChangeStreamPartitionsSettings = ServerStreamingCallSettings.newBuilder();
readChangeStreamSettings = ServerStreamingCallSettings.newBuilder();
+ executeQuerySettings = ServerStreamingCallSettings.newBuilder();
unaryMethodSettingsBuilders =
ImmutableList.>of(
@@ -396,6 +410,7 @@ protected Builder(BigtableStubSettings settings) {
generateInitialChangeStreamPartitionsSettings =
settings.generateInitialChangeStreamPartitionsSettings.toBuilder();
readChangeStreamSettings = settings.readChangeStreamSettings.toBuilder();
+ executeQuerySettings = settings.executeQuerySettings.toBuilder();
unaryMethodSettingsBuilders =
ImmutableList.>of(
@@ -463,6 +478,11 @@ private static Builder initDefaults(Builder builder) {
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_6_codes"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_6_params"));
+ builder
+ .executeQuerySettings()
+ .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes"))
+ .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params"));
+
return builder;
}
@@ -538,6 +558,12 @@ public UnaryCallSettings.Builder mutateRowS
return readChangeStreamSettings;
}
+ /** Returns the builder for the settings used for calls to executeQuery. */
+ public ServerStreamingCallSettings.Builder
+ executeQuerySettings() {
+ return executeQuerySettings;
+ }
+
@Override
public BigtableStubSettings build() throws IOException {
return new BigtableStubSettings(this);
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java
index 57d9748cca..d0022a1a46 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java
@@ -44,6 +44,7 @@
import com.google.api.gax.rpc.RequestParamsExtractor;
import com.google.api.gax.rpc.ServerStreamingCallSettings;
import com.google.api.gax.rpc.ServerStreamingCallable;
+import com.google.api.gax.rpc.StatusCode.Code;
import com.google.api.gax.rpc.UnaryCallSettings;
import com.google.api.gax.rpc.UnaryCallable;
import com.google.api.gax.tracing.ApiTracerFactory;
@@ -56,6 +57,8 @@
import com.google.bigtable.v2.BigtableGrpc;
import com.google.bigtable.v2.CheckAndMutateRowRequest;
import com.google.bigtable.v2.CheckAndMutateRowResponse;
+import com.google.bigtable.v2.ExecuteQueryRequest;
+import com.google.bigtable.v2.ExecuteQueryResponse;
import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest;
import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse;
import com.google.bigtable.v2.MutateRowRequest;
@@ -77,6 +80,7 @@
import com.google.cloud.bigtable.data.v2.internal.JwtCredentialsWithAudience;
import com.google.cloud.bigtable.data.v2.internal.NameUtil;
import com.google.cloud.bigtable.data.v2.internal.RequestContext;
+import com.google.cloud.bigtable.data.v2.internal.SqlRow;
import com.google.cloud.bigtable.data.v2.models.BulkMutation;
import com.google.cloud.bigtable.data.v2.models.ChangeStreamMutation;
import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord;
@@ -95,6 +99,7 @@
import com.google.cloud.bigtable.data.v2.models.RowMutationEntry;
import com.google.cloud.bigtable.data.v2.models.SampleRowKeysRequest;
import com.google.cloud.bigtable.data.v2.models.TargetId;
+import com.google.cloud.bigtable.data.v2.models.sql.Statement;
import com.google.cloud.bigtable.data.v2.stub.changestream.ChangeStreamRecordMergingCallable;
import com.google.cloud.bigtable.data.v2.stub.changestream.GenerateInitialChangeStreamPartitionsUserCallable;
import com.google.cloud.bigtable.data.v2.stub.changestream.ReadChangeStreamResumptionStrategy;
@@ -125,6 +130,10 @@
import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsRetryCompletedCallable;
import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsUserCallable;
import com.google.cloud.bigtable.data.v2.stub.readrows.RowMergingCallable;
+import com.google.cloud.bigtable.data.v2.stub.sql.ExecuteQueryCallContext;
+import com.google.cloud.bigtable.data.v2.stub.sql.ExecuteQueryCallable;
+import com.google.cloud.bigtable.data.v2.stub.sql.MetadataResolvingCallable;
+import com.google.cloud.bigtable.data.v2.stub.sql.SqlRowMergingCallable;
import com.google.cloud.bigtable.gaxx.retrying.ApiResultRetryAlgorithm;
import com.google.cloud.bigtable.gaxx.retrying.RetryInfoRetryAlgorithm;
import com.google.common.annotations.VisibleForTesting;
@@ -148,6 +157,7 @@
import java.util.Collections;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -200,6 +210,8 @@ public class EnhancedBigtableStub implements AutoCloseable {
private final ServerStreamingCallable
readChangeStreamCallable;
+ private final ExecuteQueryCallable executeQueryCallable;
+
public static EnhancedBigtableStub create(EnhancedBigtableStubSettings settings)
throws IOException {
ClientContext clientContext = createClientContext(settings);
@@ -466,6 +478,7 @@ public EnhancedBigtableStub(
readChangeStreamCallable =
createReadChangeStreamCallable(new DefaultChangeStreamRecordAdapter());
pingAndWarmCallable = createPingAndWarmCallable();
+ executeQueryCallable = createExecuteQueryCallable();
}
//
@@ -1284,6 +1297,74 @@ public Map extract(
return traced.withDefaultCallContext(clientContext.getDefaultCallContext());
}
+ /**
+ * Creates a callable chain to handle streaming ExecuteQuery RPCs. The chain will:
+ *
+ *
+ * Convert a {@link Statement} into a {@link ExecuteQueryCallContext}, which passes the
+ * {@link Statement} & a future for the {@link
+ * com.google.cloud.bigtable.data.v2.models.sql.ResultSetMetadata} up the call chain.
+ * Upon receiving the response stream, it will set the metadata future and translate the
+ * {@link com.google.bigtable.v2.PartialResultSet}s into {@link SqlRow}s
+ * Add tracing & metrics.
+ * Wrap the metadata future & row stream into a {@link
+ * com.google.cloud.bigtable.data.v2.stub.sql.SqlServerStream}
+ *
+ */
+ @InternalApi("For internal use only")
+ public ExecuteQueryCallable createExecuteQueryCallable() {
+ // TODO support resumption
+ // TODO update codes once resumption is implemented
+ Set retryableCodes = Collections.emptySet();
+ ServerStreamingCallable base =
+ GrpcRawCallableFactory.createServerStreamingCallable(
+ GrpcCallSettings.newBuilder()
+ .setMethodDescriptor(BigtableGrpc.getExecuteQueryMethod())
+ .setParamsExtractor(
+ new RequestParamsExtractor() {
+ @Override
+ public Map extract(ExecuteQueryRequest executeQueryRequest) {
+ return ImmutableMap.of(
+ "name", executeQueryRequest.getInstanceName(),
+ "app_profile_id", executeQueryRequest.getAppProfileId());
+ }
+ })
+ .build(),
+ retryableCodes);
+
+ ServerStreamingCallable withStatsHeaders =
+ new StatsHeadersServerStreamingCallable<>(base);
+
+ ServerStreamingCallSettings innerSettings =
+ ServerStreamingCallSettings.newBuilder()
+ // TODO resumption strategy and retry settings
+ .setIdleTimeout(settings.executeQuerySettings().getIdleTimeout())
+ .setWaitTimeout(settings.executeQuerySettings().getWaitTimeout())
+ .build();
+
+ // Watchdog needs to stay above the metadata observer so that watchdog errors
+ // are passed through to the metadata future.
+ ServerStreamingCallable watched =
+ Callables.watched(withStatsHeaders, innerSettings, clientContext);
+
+ ServerStreamingCallable withMetadataObserver =
+ new MetadataResolvingCallable(watched);
+
+ ServerStreamingCallable merging =
+ new SqlRowMergingCallable(withMetadataObserver);
+
+ ServerStreamingCallable withBigtableTracer =
+ new BigtableTracerStreamingCallable<>(merging);
+
+ SpanName span = getSpanName("ExecuteQuery");
+ ServerStreamingCallable traced =
+ new TracedServerStreamingCallable<>(
+ withBigtableTracer, clientContext.getTracerFactory(), span);
+
+ return new ExecuteQueryCallable(
+ traced.withDefaultCallContext(clientContext.getDefaultCallContext()), requestContext);
+ }
+
/**
* Wraps a callable chain in a user presentable callable that will inject the default call context
* and trace the call.
@@ -1416,6 +1497,11 @@ public UnaryCallable readModifyWriteRowCallable() {
return readChangeStreamCallable;
}
+ /** Returns an {@link com.google.cloud.bigtable.data.v2.stub.sql.ExecuteQueryCallable} */
+ public ExecuteQueryCallable executeQueryCallable() {
+ return executeQueryCallable;
+ }
+
UnaryCallable pingAndWarmCallable() {
return pingAndWarmCallable;
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java
index 5a9e03cf10..4415894132 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java
@@ -35,6 +35,7 @@
import com.google.bigtable.v2.FeatureFlags;
import com.google.bigtable.v2.PingAndWarmRequest;
import com.google.cloud.bigtable.Version;
+import com.google.cloud.bigtable.data.v2.internal.SqlRow;
import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord;
import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation;
import com.google.cloud.bigtable.data.v2.models.KeyOffset;
@@ -44,6 +45,7 @@
import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow;
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
+import com.google.cloud.bigtable.data.v2.models.sql.Statement;
import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor;
@@ -57,6 +59,7 @@
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Base64;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -187,6 +190,18 @@ public class EnhancedBigtableStubSettings extends StubSettings EXECUTE_QUERY_RETRY_CODES = Collections.emptySet();
+
+ // We still setup retry settings in order to set default deadlines
+ private static final RetrySettings EXECUTE_QUERY_RETRY_SETTINGS =
+ RetrySettings.newBuilder()
+ .setMaxAttempts(1)
+ // Set a conservative deadline to start for preview. We'll increase this in the future
+ .setInitialRpcTimeout(Duration.ofSeconds(30))
+ .setMaxRpcTimeout(Duration.ofSeconds(30))
+ .build();
/**
* Scopes that are equivalent to JWT's audience.
*
@@ -230,6 +245,7 @@ public class EnhancedBigtableStubSettings extends StubSettings
readChangeStreamSettings;
private final UnaryCallSettings pingAndWarmSettings;
+ private final ServerStreamingCallSettings executeQuerySettings;
private final FeatureFlags featureFlags;
@@ -276,6 +292,7 @@ private EnhancedBigtableStubSettings(Builder builder) {
builder.generateInitialChangeStreamPartitionsSettings.build();
readChangeStreamSettings = builder.readChangeStreamSettings.build();
pingAndWarmSettings = builder.pingAndWarmSettings.build();
+ executeQuerySettings = builder.executeQuerySettings.build();
featureFlags = builder.featureFlags.build();
}
@@ -614,6 +631,10 @@ public UnaryCallSettings readModifyWriteRowSettings() {
return readChangeStreamSettings;
}
+ public ServerStreamingCallSettings executeQuerySettings() {
+ return executeQuerySettings;
+ }
+
/**
* Returns the object with the settings used for calls to PingAndWarm.
*
@@ -654,6 +675,7 @@ public static class Builder extends StubSettings.Builder
readChangeStreamSettings;
private final UnaryCallSettings.Builder pingAndWarmSettings;
+ private final ServerStreamingCallSettings.Builder executeQuerySettings;
private FeatureFlags.Builder featureFlags;
@@ -782,6 +804,14 @@ private Builder() {
.setTotalTimeout(PRIME_REQUEST_TIMEOUT)
.build());
+ executeQuerySettings = ServerStreamingCallSettings.newBuilder();
+ executeQuerySettings
+ .setRetryableCodes(EXECUTE_QUERY_RETRY_CODES)
+ // This is used to set deadlines. We do not support retries yet.
+ .setRetrySettings(EXECUTE_QUERY_RETRY_SETTINGS)
+ .setIdleTimeout(Duration.ofMinutes(5))
+ .setWaitTimeout(Duration.ofMinutes(5));
+
featureFlags =
FeatureFlags.newBuilder().setReverseScans(true).setLastScannedRowResponses(true);
}
@@ -811,6 +841,7 @@ private Builder(EnhancedBigtableStubSettings settings) {
settings.generateInitialChangeStreamPartitionsSettings.toBuilder();
readChangeStreamSettings = settings.readChangeStreamSettings.toBuilder();
pingAndWarmSettings = settings.pingAndWarmSettings.toBuilder();
+ executeQuerySettings = settings.executeQuerySettings().toBuilder();
featureFlags = settings.featureFlags.toBuilder();
}
//
@@ -1066,6 +1097,17 @@ public UnaryCallSettings.Builder pingAndWarmSettings()
return pingAndWarmSettings;
}
+ /**
+ * Returns the builder for the settings used for calls to ExecuteQuery
+ *
+ * Note that this will currently ignore any retry settings other than deadlines. ExecuteQuery
+ * requests will not be retried currently.
+ */
+ @BetaApi
+ public ServerStreamingCallSettings.Builder executeQuerySettings() {
+ return executeQuerySettings;
+ }
+
@SuppressWarnings("unchecked")
public EnhancedBigtableStubSettings build() {
Preconditions.checkState(projectId != null, "Project id must be set");
@@ -1136,6 +1178,7 @@ public String toString() {
generateInitialChangeStreamPartitionsSettings)
.add("readChangeStreamSettings", readChangeStreamSettings)
.add("pingAndWarmSettings", pingAndWarmSettings)
+ .add("executeQuerySettings", executeQuerySettings)
.add("metricsProvider", metricsProvider)
.add("parent", super.toString())
.toString();
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/GrpcBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/GrpcBigtableStub.java
index 60f611e636..d5a81334b9 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/GrpcBigtableStub.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/GrpcBigtableStub.java
@@ -28,6 +28,8 @@
import com.google.api.pathtemplate.PathTemplate;
import com.google.bigtable.v2.CheckAndMutateRowRequest;
import com.google.bigtable.v2.CheckAndMutateRowResponse;
+import com.google.bigtable.v2.ExecuteQueryRequest;
+import com.google.bigtable.v2.ExecuteQueryResponse;
import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest;
import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse;
import com.google.bigtable.v2.MutateRowRequest;
@@ -156,6 +158,16 @@ public class GrpcBigtableStub extends BigtableStub {
ProtoUtils.marshaller(ReadChangeStreamResponse.getDefaultInstance()))
.build();
+ private static final MethodDescriptor
+ executeQueryMethodDescriptor =
+ MethodDescriptor.newBuilder()
+ .setType(MethodDescriptor.MethodType.SERVER_STREAMING)
+ .setFullMethodName("google.bigtable.v2.Bigtable/ExecuteQuery")
+ .setRequestMarshaller(ProtoUtils.marshaller(ExecuteQueryRequest.getDefaultInstance()))
+ .setResponseMarshaller(
+ ProtoUtils.marshaller(ExecuteQueryResponse.getDefaultInstance()))
+ .build();
+
private final ServerStreamingCallable readRowsCallable;
private final ServerStreamingCallable
sampleRowKeysCallable;
@@ -172,6 +184,8 @@ public class GrpcBigtableStub extends BigtableStub {
generateInitialChangeStreamPartitionsCallable;
private final ServerStreamingCallable
readChangeStreamCallable;
+ private final ServerStreamingCallable
+ executeQueryCallable;
private final BackgroundResource backgroundResources;
private final GrpcOperationsStub operationsStub;
@@ -223,6 +237,10 @@ public class GrpcBigtableStub extends BigtableStub {
private static final PathTemplate READ_MODIFY_WRITE_ROW_2_PATH_TEMPLATE =
PathTemplate.create(
"{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}");
+ private static final PathTemplate EXECUTE_QUERY_0_PATH_TEMPLATE =
+ PathTemplate.create("{name=projects/*/instances/*}");
+ private static final PathTemplate EXECUTE_QUERY_1_PATH_TEMPLATE =
+ PathTemplate.create("{app_profile_id=**}");
public static final GrpcBigtableStub create(BigtableStubSettings settings) throws IOException {
return new GrpcBigtableStub(settings, ClientContext.create(settings));
@@ -407,6 +425,18 @@ protected GrpcBigtableStub(
return builder.build();
})
.build();
+ GrpcCallSettings executeQueryTransportSettings =
+ GrpcCallSettings.newBuilder()
+ .setMethodDescriptor(executeQueryMethodDescriptor)
+ .setParamsExtractor(
+ request -> {
+ RequestParamsBuilder builder = RequestParamsBuilder.create();
+ builder.add(request.getInstanceName(), "name", EXECUTE_QUERY_0_PATH_TEMPLATE);
+ builder.add(
+ request.getAppProfileId(), "app_profile_id", EXECUTE_QUERY_1_PATH_TEMPLATE);
+ return builder.build();
+ })
+ .build();
this.readRowsCallable =
callableFactory.createServerStreamingCallable(
@@ -441,6 +471,9 @@ protected GrpcBigtableStub(
this.readChangeStreamCallable =
callableFactory.createServerStreamingCallable(
readChangeStreamTransportSettings, settings.readChangeStreamSettings(), clientContext);
+ this.executeQueryCallable =
+ callableFactory.createServerStreamingCallable(
+ executeQueryTransportSettings, settings.executeQuerySettings(), clientContext);
this.backgroundResources =
new BackgroundResourceAggregation(clientContext.getBackgroundResources());
@@ -502,6 +535,11 @@ public UnaryCallable pingAndWarmCallabl
return readChangeStreamCallable;
}
+ @Override
+ public ServerStreamingCallable executeQueryCallable() {
+ return executeQueryCallable;
+ }
+
@Override
public final void close() {
try {
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/ExecuteQueryCallContext.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/ExecuteQueryCallContext.java
new file mode 100644
index 0000000000..8d0e6b81d0
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/ExecuteQueryCallContext.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.sql;
+
+import com.google.api.core.InternalApi;
+import com.google.api.core.SettableApiFuture;
+import com.google.auto.value.AutoValue;
+import com.google.bigtable.v2.ExecuteQueryRequest;
+import com.google.cloud.bigtable.data.v2.models.sql.ResultSetMetadata;
+
+/**
+ * POJO used to provide a future to the ExecuteQuery callable chain in order to return metadata to
+ * users outside of the stream of rows.
+ *
+ * This should only be constructed by {@link ExecuteQueryCallable} not directly by users.
+ *
+ *
This is considered an internal implementation detail and should not be used by applications.
+ */
+@InternalApi("For internal use only")
+@AutoValue
+public abstract class ExecuteQueryCallContext {
+
+ @InternalApi("For internal use only")
+ public static ExecuteQueryCallContext create(
+ ExecuteQueryRequest request, SettableApiFuture metadataFuture) {
+ return new AutoValue_ExecuteQueryCallContext(request, metadataFuture);
+ }
+
+ abstract ExecuteQueryRequest request();
+
+ abstract SettableApiFuture resultSetMetadataFuture();
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/ExecuteQueryCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/ExecuteQueryCallable.java
new file mode 100644
index 0000000000..9563b6c6f9
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/ExecuteQueryCallable.java
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.sql;
+
+import com.google.api.core.InternalApi;
+import com.google.api.core.SettableApiFuture;
+import com.google.api.gax.rpc.ApiCallContext;
+import com.google.api.gax.rpc.ResponseObserver;
+import com.google.api.gax.rpc.ServerStream;
+import com.google.api.gax.rpc.ServerStreamingCallable;
+import com.google.bigtable.v2.ExecuteQueryRequest;
+import com.google.cloud.bigtable.data.v2.internal.RequestContext;
+import com.google.cloud.bigtable.data.v2.internal.SqlRow;
+import com.google.cloud.bigtable.data.v2.models.sql.ResultSetMetadata;
+import com.google.cloud.bigtable.data.v2.models.sql.Statement;
+
+/**
+ * Callable that creates {@link SqlServerStream}s from {@link ExecuteQueryRequest}s.
+ *
+ * This handles setting up the future that is used to allow users to access metadata.
+ *
+ *
This class is considered an internal implementation detail and not meant to be used by
+ * applications.
+ */
+@InternalApi
+public class ExecuteQueryCallable extends ServerStreamingCallable {
+
+ private final ServerStreamingCallable inner;
+ private final RequestContext requestContext;
+
+ public ExecuteQueryCallable(
+ ServerStreamingCallable inner,
+ RequestContext requestContext) {
+ this.inner = inner;
+ this.requestContext = requestContext;
+ }
+
+ public SqlServerStream call(Statement statement) {
+ ExecuteQueryRequest request = statement.toProto(requestContext);
+ SettableApiFuture metadataFuture = SettableApiFuture.create();
+ ServerStream rowStream =
+ this.call(ExecuteQueryCallContext.create(request, metadataFuture));
+ return SqlServerStreamImpl.create(metadataFuture, rowStream);
+ }
+
+ @Override
+ public void call(
+ ExecuteQueryCallContext callContext,
+ ResponseObserver responseObserver,
+ ApiCallContext apiCallContext) {
+ inner.call(callContext, responseObserver, apiCallContext);
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/MetadataResolvingCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/MetadataResolvingCallable.java
new file mode 100644
index 0000000000..6b2f2b171f
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/MetadataResolvingCallable.java
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.sql;
+
+import com.google.api.core.InternalApi;
+import com.google.api.core.SettableApiFuture;
+import com.google.api.gax.rpc.ApiCallContext;
+import com.google.api.gax.rpc.ResponseObserver;
+import com.google.api.gax.rpc.ServerStreamingCallable;
+import com.google.api.gax.rpc.StreamController;
+import com.google.bigtable.v2.ExecuteQueryRequest;
+import com.google.bigtable.v2.ExecuteQueryResponse;
+import com.google.cloud.bigtable.data.v2.internal.ProtoResultSetMetadata;
+import com.google.cloud.bigtable.data.v2.models.sql.ResultSetMetadata;
+import com.google.cloud.bigtable.data.v2.stub.SafeResponseObserver;
+
+/**
+ * Callable that allows passing of {@link ResultSetMetadata} back to users throught the {@link
+ * ExecuteQueryCallContext}.
+ *
+ * This is considered an internal implementation detail and should not be used by applications.
+ */
+@InternalApi("For internal use only")
+public class MetadataResolvingCallable
+ extends ServerStreamingCallable {
+ private final ServerStreamingCallable inner;
+
+ public MetadataResolvingCallable(
+ ServerStreamingCallable inner) {
+ this.inner = inner;
+ }
+
+ @Override
+ public void call(
+ ExecuteQueryCallContext callContext,
+ ResponseObserver responseObserver,
+ ApiCallContext apiCallContext) {
+ MetadataObserver observer =
+ new MetadataObserver(responseObserver, callContext.resultSetMetadataFuture());
+ inner.call(callContext.request(), observer, apiCallContext);
+ }
+
+ static final class MetadataObserver extends SafeResponseObserver {
+
+ private final SettableApiFuture metadataFuture;
+ private final ResponseObserver outerObserver;
+ // This doesn't need to be synchronized because this is called above the reframer
+ // so onResponse will be called sequentially
+ private boolean isFirstResponse;
+
+ MetadataObserver(
+ ResponseObserver outerObserver,
+ SettableApiFuture metadataFuture) {
+ super(outerObserver);
+ this.outerObserver = outerObserver;
+ this.metadataFuture = metadataFuture;
+ this.isFirstResponse = true;
+ }
+
+ @Override
+ protected void onStartImpl(StreamController streamController) {
+ outerObserver.onStart(streamController);
+ }
+
+ @Override
+ protected void onResponseImpl(ExecuteQueryResponse response) {
+ if (isFirstResponse && !response.hasMetadata()) {
+ IllegalStateException e =
+ new IllegalStateException("First response must always contain metadata");
+ metadataFuture.setException(e);
+ throw e;
+ }
+ isFirstResponse = false;
+ if (response.hasMetadata()) {
+ try {
+ ResultSetMetadata md = ProtoResultSetMetadata.fromProto(response.getMetadata());
+ metadataFuture.set(md);
+ } catch (Throwable t) {
+ metadataFuture.setException(t);
+ throw t;
+ }
+ }
+ outerObserver.onResponse(response);
+ }
+
+ @Override
+ protected void onErrorImpl(Throwable throwable) {
+ // When we support retries this will have to move after the retrying callable in a separate
+ // observer.
+ metadataFuture.setException(throwable);
+ outerObserver.onError(throwable);
+ }
+
+ @Override
+ protected void onCompleteImpl() {
+ if (!metadataFuture.isDone()) {
+ IllegalStateException missingMetadataException =
+ new IllegalStateException("Unexpected Stream complete without receiving metadata");
+ metadataFuture.setException(missingMetadataException);
+ throw missingMetadataException;
+ }
+ outerObserver.onComplete();
+ }
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/ProtoRowsMergingStateMachine.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/ProtoRowsMergingStateMachine.java
new file mode 100644
index 0000000000..deefda4cad
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/ProtoRowsMergingStateMachine.java
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.sql;
+
+import com.google.api.core.InternalApi;
+import com.google.bigtable.v2.PartialResultSet;
+import com.google.bigtable.v2.ProtoRows;
+import com.google.bigtable.v2.Value;
+import com.google.cloud.bigtable.data.v2.internal.ProtoSqlRow;
+import com.google.cloud.bigtable.data.v2.internal.SqlRow;
+import com.google.cloud.bigtable.data.v2.models.sql.ColumnMetadata;
+import com.google.cloud.bigtable.data.v2.models.sql.ResultSetMetadata;
+import com.google.cloud.bigtable.data.v2.models.sql.SqlType;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Queue;
+
+/**
+ * Used to transform a stream of {@link com.google.bigtable.v2.ProtoRowsBatch} bytes chunks into
+ * {@link ProtoSqlRow}s for the given schema. Each SqlRow represents a logical row for a sql
+ * response.
+ *
+ * The intended usage of this class is:
+ *
+ *
+ * Add results with {@link #addPartialResultSet(PartialResultSet)} until {@link
+ * #hasCompleteBatch()} is true
+ * Call {@link #populateQueue(Queue)} to materialize results from the complete batch.
+ * Repeat until all {@link PartialResultSet}s have been processed
+ * Ensure that there is no incomplete data using {@link #isBatchInProgress()}
+ *
+ *
+ * Package-private for internal use. This class is not thread safe.
+ */
+@InternalApi
+final class ProtoRowsMergingStateMachine {
+ enum State {
+ /** Waiting for the first chunk of bytes for a new batch */
+ AWAITING_NEW_BATCH,
+ /** Waiting for the next chunk of bytes, to combine with the bytes currently being buffered. */
+ AWAITING_PARTIAL_BATCH,
+ /** Buffering a complete batch of rows, waiting for populateQueue to be called for the batch */
+ AWAITING_BATCH_CONSUME,
+ }
+
+ private final ResultSetMetadata metadata;
+ private State state;
+ private ByteString batchBuffer;
+ private ProtoRows completeBatch;
+
+ ProtoRowsMergingStateMachine(ResultSetMetadata metadata) {
+ this.metadata = metadata;
+ state = State.AWAITING_NEW_BATCH;
+ batchBuffer = ByteString.empty();
+ }
+
+ /**
+ * Adds the bytes from the given PartialResultSet to the current buffer. If a resume token is
+ * present, attempts to parse the bytes to the underlying protobuf row format
+ */
+ void addPartialResultSet(PartialResultSet results) {
+ Preconditions.checkState(
+ state != State.AWAITING_BATCH_CONSUME,
+ "Attempting to add partial result set to state machine in state AWAITING_BATCH_CONSUME");
+ // ByteString has an efficient concat which generally involves no copying
+ batchBuffer = batchBuffer.concat(results.getProtoRowsBatch().getBatchData());
+ state = State.AWAITING_PARTIAL_BATCH;
+ if (results.getResumeToken().isEmpty()) {
+ return;
+ }
+ // A resume token means the batch is complete and safe to yield
+ // We can receive resume tokens with no new data. In this case we yield an empty batch.
+ if (batchBuffer.isEmpty()) {
+ completeBatch = ProtoRows.getDefaultInstance();
+ } else {
+ try {
+ completeBatch = ProtoRows.parseFrom(batchBuffer);
+ } catch (InvalidProtocolBufferException e) {
+ throw new InternalError("Unexpected exception parsing response protobuf", e);
+ }
+ }
+ // Empty buffers can benefit from resetting because ByteString.concat builds a rope
+ batchBuffer = ByteString.empty();
+ state = State.AWAITING_BATCH_CONSUME;
+ }
+
+ /** Returns true if there is a complete batch buffered, false otherwise */
+ boolean hasCompleteBatch() {
+ return state == State.AWAITING_BATCH_CONSUME;
+ }
+
+ /** Returns true if there is a partial or complete batch buffered, false otherwise */
+ boolean isBatchInProgress() {
+ return hasCompleteBatch() || state == State.AWAITING_PARTIAL_BATCH;
+ }
+
+ /**
+ * Populates the given queue with the complete batch of rows
+ *
+ * @throws IllegalStateException if there is not a complete batch
+ */
+ void populateQueue(Queue queue) {
+ Preconditions.checkState(
+ state == State.AWAITING_BATCH_CONSUME,
+ "Attempting to populate Queue from state machine without completed batch");
+ Iterator valuesIterator = completeBatch.getValuesList().iterator();
+ while (valuesIterator.hasNext()) {
+ ImmutableList.Builder rowDataBuilder = ImmutableList.builder();
+ for (ColumnMetadata c : metadata.getColumns()) {
+ Preconditions.checkState(
+ valuesIterator.hasNext(), "Incomplete row received with first missing column: %s", c);
+ Value v = valuesIterator.next();
+ validateValueAndType(c.type(), v);
+ rowDataBuilder.add(v);
+ }
+ queue.add(ProtoSqlRow.create(metadata, rowDataBuilder.build()));
+ }
+ // reset the batch to be empty
+ completeBatch = ProtoRows.getDefaultInstance();
+ state = State.AWAITING_NEW_BATCH;
+ }
+
+ @InternalApi("VisibleForTestingOnly")
+ static void validateValueAndType(SqlType> type, Value value) {
+ // Null is represented as a value with none of the kind fields set
+ if (value.getKindCase() == Value.KindCase.KIND_NOT_SET) {
+ return;
+ }
+ switch (type.getCode()) {
+ // Primitive types
+ case STRING:
+ checkExpectedKind(value, Value.KindCase.STRING_VALUE, type);
+ break;
+ case BYTES:
+ checkExpectedKind(value, Value.KindCase.BYTES_VALUE, type);
+ break;
+ case INT64:
+ checkExpectedKind(value, Value.KindCase.INT_VALUE, type);
+ break;
+ case FLOAT64:
+ case FLOAT32:
+ checkExpectedKind(value, Value.KindCase.FLOAT_VALUE, type);
+ break;
+ case BOOL:
+ checkExpectedKind(value, Value.KindCase.BOOL_VALUE, type);
+ break;
+ case TIMESTAMP:
+ checkExpectedKind(value, Value.KindCase.TIMESTAMP_VALUE, type);
+ break;
+ case DATE:
+ checkExpectedKind(value, Value.KindCase.DATE_VALUE, type);
+ break;
+ // Complex types
+ case ARRAY:
+ checkExpectedKind(value, Value.KindCase.ARRAY_VALUE, type);
+ SqlType.Array> arrayType = (SqlType.Array>) type;
+ SqlType> elemType = arrayType.getElementType();
+ for (Value element : value.getArrayValue().getValuesList()) {
+ validateValueAndType(elemType, element);
+ }
+ break;
+ case STRUCT:
+ checkExpectedKind(value, Value.KindCase.ARRAY_VALUE, type);
+ List fieldValues = value.getArrayValue().getValuesList();
+ SqlType.Struct structType = (SqlType.Struct) type;
+ for (int i = 0; i < fieldValues.size(); i++) {
+ validateValueAndType(structType.getType(i), fieldValues.get(i));
+ }
+ break;
+ case MAP:
+ checkExpectedKind(value, Value.KindCase.ARRAY_VALUE, type);
+ SqlType.Map, ?> mapType = (SqlType.Map, ?>) type;
+ for (Value mapElement : value.getArrayValue().getValuesList()) {
+ Preconditions.checkState(
+ mapElement.getArrayValue().getValuesCount() == 2,
+ "Map elements must have exactly 2 elementss");
+ validateValueAndType(
+ mapType.getKeyType(), mapElement.getArrayValue().getValuesList().get(0));
+ validateValueAndType(
+ mapType.getValueType(), mapElement.getArrayValue().getValuesList().get(1));
+ }
+ break;
+ default:
+ // This should be caught already at ResultSetMetadata creation
+ throw new IllegalStateException("Unrecognized type: " + type);
+ }
+ }
+
+ private static void checkExpectedKind(Value value, Value.KindCase expectedKind, SqlType> type) {
+ Preconditions.checkState(
+ value.getKindCase() == expectedKind,
+ "Value kind must be %s for columns of type: %s",
+ expectedKind.name(),
+ type);
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/SqlRowMerger.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/SqlRowMerger.java
new file mode 100644
index 0000000000..6178a1efcd
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/SqlRowMerger.java
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.sql;
+
+import com.google.api.core.InternalApi;
+import com.google.bigtable.v2.ExecuteQueryResponse;
+import com.google.bigtable.v2.PartialResultSet;
+import com.google.cloud.bigtable.data.v2.internal.ProtoResultSetMetadata;
+import com.google.cloud.bigtable.data.v2.internal.SqlRow;
+import com.google.cloud.bigtable.data.v2.models.sql.ResultSetMetadata;
+import com.google.cloud.bigtable.gaxx.reframing.Reframer;
+import com.google.common.base.Preconditions;
+import java.util.ArrayDeque;
+import java.util.Queue;
+
+/**
+ * Used to transform a stream of ExecuteQueryResponse objects into rows. This class is not thread
+ * safe.
+ */
+@InternalApi
+public final class SqlRowMerger implements Reframer {
+
+ enum State {
+ AWAITING_METADATA,
+ PROCESSING_DATA,
+ }
+
+ private final Queue queue;
+ private ProtoRowsMergingStateMachine stateMachine;
+ private State currentState;
+
+ public SqlRowMerger() {
+ queue = new ArrayDeque<>();
+ currentState = State.AWAITING_METADATA;
+ }
+
+ /**
+ * Used to add responses to the SqlRowMerger as they are received.
+ *
+ * @param response the next response in the stream of query responses
+ */
+ // Suppress this because it won't be forced to be exhaustive once it is open-sourced, so we want a
+ // default.
+ @SuppressWarnings("UnnecessaryDefaultInEnumSwitch")
+ @Override
+ public void push(ExecuteQueryResponse response) {
+ switch (currentState) {
+ case AWAITING_METADATA:
+ Preconditions.checkState(
+ response.hasMetadata(),
+ "Expected metadata response, but received: %s",
+ response.getResponseCase().name());
+ ResultSetMetadata responseMetadata =
+ ProtoResultSetMetadata.fromProto(response.getMetadata());
+ stateMachine = new ProtoRowsMergingStateMachine(responseMetadata);
+ currentState = State.PROCESSING_DATA;
+ break;
+ case PROCESSING_DATA:
+ Preconditions.checkState(
+ response.hasResults(),
+ "Expected results response, but received: %s",
+ response.getResponseCase().name());
+ PartialResultSet results = response.getResults();
+ processProtoRows(results);
+ break;
+ default:
+ throw new IllegalStateException("Unknown State: " + currentState.name());
+ }
+ }
+
+ private void processProtoRows(PartialResultSet results) {
+ stateMachine.addPartialResultSet(results);
+ if (stateMachine.hasCompleteBatch()) {
+ stateMachine.populateQueue(queue);
+ }
+ }
+
+ /**
+ * Check if the merger has consumable data
+ *
+ * @return true if there is a complete row, false otherwise.
+ */
+ @Override
+ public boolean hasFullFrame() {
+ return !queue.isEmpty();
+ }
+
+ /**
+ * Check if the merger contains partially complete (or complete) data.
+ *
+ * @return true if there is a partial (or complete) batch, false otherwise.
+ */
+ @Override
+ public boolean hasPartialFrame() {
+ switch (currentState) {
+ case AWAITING_METADATA:
+ return false;
+ case PROCESSING_DATA:
+ return hasFullFrame() || stateMachine.isBatchInProgress();
+ default:
+ throw new IllegalStateException("Unknown State: " + currentState.name());
+ }
+ }
+
+ /** pops a completed row from the FIFO queue built from the given responses. */
+ @Override
+ public SqlRow pop() {
+ return Preconditions.checkNotNull(
+ queue.poll(), "SqlRowMerger.pop() called when there are no complete rows.");
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/SqlRowMergingCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/SqlRowMergingCallable.java
new file mode 100644
index 0000000000..6d5d0ea4a4
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/SqlRowMergingCallable.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.sql;
+
+import com.google.api.core.InternalApi;
+import com.google.api.gax.rpc.ApiCallContext;
+import com.google.api.gax.rpc.ResponseObserver;
+import com.google.api.gax.rpc.ServerStreamingCallable;
+import com.google.bigtable.v2.ExecuteQueryResponse;
+import com.google.cloud.bigtable.data.v2.internal.SqlRow;
+import com.google.cloud.bigtable.gaxx.reframing.ReframingResponseObserver;
+
+@InternalApi
+public class SqlRowMergingCallable
+ extends ServerStreamingCallable {
+ private final ServerStreamingCallable inner;
+
+ public SqlRowMergingCallable(
+ ServerStreamingCallable inner) {
+ this.inner = inner;
+ }
+
+ @Override
+ public void call(
+ ExecuteQueryCallContext callContext,
+ ResponseObserver responseObserver,
+ ApiCallContext apiCallContext) {
+ SqlRowMerger merger = new SqlRowMerger();
+ ReframingResponseObserver observer =
+ new ReframingResponseObserver<>(responseObserver, merger);
+ inner.call(callContext, observer, apiCallContext);
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/SqlServerStream.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/SqlServerStream.java
new file mode 100644
index 0000000000..1523e09235
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/SqlServerStream.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.sql;
+
+import com.google.api.core.ApiFuture;
+import com.google.api.core.InternalApi;
+import com.google.api.gax.rpc.ServerStream;
+import com.google.cloud.bigtable.data.v2.internal.SqlRow;
+import com.google.cloud.bigtable.data.v2.models.sql.ResultSetMetadata;
+
+/**
+ * Wrapper for results of an ExecuteQuery call that includes both the stream of rows and a future to
+ * access {@link ResultSetMetadata}.
+ *
+ * This should only be created by {@link ExecuteQueryCallable}, never directly by users/
+ *
+ *
This is considered an internal implementation detail and should not be used by applications.
+ */
+@InternalApi("For internal use only")
+public interface SqlServerStream {
+ ApiFuture metadataFuture();
+
+ ServerStream rows();
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/SqlServerStreamImpl.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/SqlServerStreamImpl.java
new file mode 100644
index 0000000000..caeb2e4788
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/sql/SqlServerStreamImpl.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.sql;
+
+import com.google.api.core.ApiFuture;
+import com.google.api.core.InternalApi;
+import com.google.api.gax.rpc.ServerStream;
+import com.google.auto.value.AutoValue;
+import com.google.cloud.bigtable.data.v2.internal.SqlRow;
+import com.google.cloud.bigtable.data.v2.models.sql.ResultSetMetadata;
+
+/**
+ * Implementation of {@link SqlServerStream} using AutoValue
+ *
+ * This is considered an internal implementation detail and should not be used by applications.
+ */
+@InternalApi("For internal use only")
+@AutoValue
+public abstract class SqlServerStreamImpl implements SqlServerStream {
+
+ @InternalApi("Visible for testing")
+ public static SqlServerStreamImpl create(
+ ApiFuture metadataApiFuture, ServerStream rowServerStream) {
+ return new AutoValue_SqlServerStreamImpl(metadataApiFuture, rowServerStream);
+ }
+
+ @Override
+ public abstract ApiFuture metadataFuture();
+
+ @Override
+ public abstract ServerStream rows();
+}
diff --git a/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.data.v2/reflect-config.json b/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.data.v2/reflect-config.json
index 2e7b1522bf..007c234eca 100644
--- a/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.data.v2/reflect-config.json
+++ b/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.data.v2/reflect-config.json
@@ -413,6 +413,24 @@
"allDeclaredClasses": true,
"allPublicClasses": true
},
+ {
+ "name": "com.google.bigtable.v2.ArrayValue",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.ArrayValue$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
{
"name": "com.google.bigtable.v2.Cell",
"queryAllDeclaredConstructors": true,
@@ -423,7 +441,529 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Cell$Builder",
+ "name": "com.google.bigtable.v2.Cell$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.CheckAndMutateRowRequest",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.CheckAndMutateRowRequest$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.CheckAndMutateRowResponse",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.CheckAndMutateRowResponse$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Column",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Column$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.ColumnMetadata",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.ColumnMetadata$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.ColumnRange",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.ColumnRange$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.ExecuteQueryRequest",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.ExecuteQueryRequest$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.ExecuteQueryResponse",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.ExecuteQueryResponse$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Family",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Family$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.FeatureFlags",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.FeatureFlags$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.FullReadStatsView",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.FullReadStatsView$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.MutateRowRequest",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.MutateRowRequest$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.MutateRowResponse",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.MutateRowResponse$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.MutateRowsRequest",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.MutateRowsRequest$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.MutateRowsRequest$Entry",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.MutateRowsRequest$Entry$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.MutateRowsResponse",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.MutateRowsResponse$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.MutateRowsResponse$Entry",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.MutateRowsResponse$Entry$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Mutation",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Mutation$AddToCell",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Mutation$AddToCell$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Mutation$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Mutation$DeleteFromColumn",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Mutation$DeleteFromColumn$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Mutation$DeleteFromFamily",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Mutation$DeleteFromFamily$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Mutation$DeleteFromRow",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Mutation$DeleteFromRow$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Mutation$SetCell",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Mutation$SetCell$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.PartialResultSet",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.PartialResultSet$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.PingAndWarmRequest",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.PingAndWarmRequest$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.PingAndWarmResponse",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.PingAndWarmResponse$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.ProtoFormat",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.ProtoFormat$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.ProtoRows",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.ProtoRows$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -432,7 +972,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.CheckAndMutateRowRequest",
+ "name": "com.google.bigtable.v2.ProtoRowsBatch",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -441,7 +981,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.CheckAndMutateRowRequest$Builder",
+ "name": "com.google.bigtable.v2.ProtoRowsBatch$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -450,7 +990,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.CheckAndMutateRowResponse",
+ "name": "com.google.bigtable.v2.ProtoSchema",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -459,7 +999,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.CheckAndMutateRowResponse$Builder",
+ "name": "com.google.bigtable.v2.ProtoSchema$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -468,7 +1008,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Column",
+ "name": "com.google.bigtable.v2.RateLimitInfo",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -477,7 +1017,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Column$Builder",
+ "name": "com.google.bigtable.v2.RateLimitInfo$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -486,7 +1026,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ColumnRange",
+ "name": "com.google.bigtable.v2.ReadChangeStreamRequest",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -495,7 +1035,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ColumnRange$Builder",
+ "name": "com.google.bigtable.v2.ReadChangeStreamRequest$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -504,7 +1044,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Family",
+ "name": "com.google.bigtable.v2.ReadChangeStreamResponse",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -513,7 +1053,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Family$Builder",
+ "name": "com.google.bigtable.v2.ReadChangeStreamResponse$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -522,7 +1062,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.FeatureFlags",
+ "name": "com.google.bigtable.v2.ReadChangeStreamResponse$CloseStream",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -531,7 +1071,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.FeatureFlags$Builder",
+ "name": "com.google.bigtable.v2.ReadChangeStreamResponse$CloseStream$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -540,7 +1080,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.FullReadStatsView",
+ "name": "com.google.bigtable.v2.ReadChangeStreamResponse$DataChange",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -549,7 +1089,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.FullReadStatsView$Builder",
+ "name": "com.google.bigtable.v2.ReadChangeStreamResponse$DataChange$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -558,7 +1098,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest",
+ "name": "com.google.bigtable.v2.ReadChangeStreamResponse$DataChange$Type",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -567,7 +1107,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest$Builder",
+ "name": "com.google.bigtable.v2.ReadChangeStreamResponse$Heartbeat",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -576,7 +1116,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse",
+ "name": "com.google.bigtable.v2.ReadChangeStreamResponse$Heartbeat$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -585,7 +1125,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse$Builder",
+ "name": "com.google.bigtable.v2.ReadChangeStreamResponse$MutationChunk",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -594,7 +1134,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.MutateRowRequest",
+ "name": "com.google.bigtable.v2.ReadChangeStreamResponse$MutationChunk$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -603,7 +1143,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.MutateRowRequest$Builder",
+ "name": "com.google.bigtable.v2.ReadChangeStreamResponse$MutationChunk$ChunkInfo",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -612,7 +1152,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.MutateRowResponse",
+ "name": "com.google.bigtable.v2.ReadChangeStreamResponse$MutationChunk$ChunkInfo$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -621,7 +1161,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.MutateRowResponse$Builder",
+ "name": "com.google.bigtable.v2.ReadIterationStats",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -630,7 +1170,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.MutateRowsRequest",
+ "name": "com.google.bigtable.v2.ReadIterationStats$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -639,7 +1179,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.MutateRowsRequest$Builder",
+ "name": "com.google.bigtable.v2.ReadModifyWriteRowRequest",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -648,7 +1188,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.MutateRowsRequest$Entry",
+ "name": "com.google.bigtable.v2.ReadModifyWriteRowRequest$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -657,7 +1197,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.MutateRowsRequest$Entry$Builder",
+ "name": "com.google.bigtable.v2.ReadModifyWriteRowResponse",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -666,7 +1206,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.MutateRowsResponse",
+ "name": "com.google.bigtable.v2.ReadModifyWriteRowResponse$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -675,7 +1215,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.MutateRowsResponse$Builder",
+ "name": "com.google.bigtable.v2.ReadModifyWriteRule",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -684,7 +1224,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.MutateRowsResponse$Entry",
+ "name": "com.google.bigtable.v2.ReadModifyWriteRule$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -693,7 +1233,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.MutateRowsResponse$Entry$Builder",
+ "name": "com.google.bigtable.v2.ReadRowsRequest",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -702,7 +1242,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Mutation",
+ "name": "com.google.bigtable.v2.ReadRowsRequest$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -711,7 +1251,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Mutation$AddToCell",
+ "name": "com.google.bigtable.v2.ReadRowsRequest$RequestStatsView",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -720,7 +1260,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Mutation$AddToCell$Builder",
+ "name": "com.google.bigtable.v2.ReadRowsResponse",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -729,7 +1269,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Mutation$Builder",
+ "name": "com.google.bigtable.v2.ReadRowsResponse$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -738,7 +1278,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Mutation$DeleteFromColumn",
+ "name": "com.google.bigtable.v2.ReadRowsResponse$CellChunk",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -747,7 +1287,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Mutation$DeleteFromColumn$Builder",
+ "name": "com.google.bigtable.v2.ReadRowsResponse$CellChunk$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -756,7 +1296,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Mutation$DeleteFromFamily",
+ "name": "com.google.bigtable.v2.RequestLatencyStats",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -765,7 +1305,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Mutation$DeleteFromFamily$Builder",
+ "name": "com.google.bigtable.v2.RequestLatencyStats$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -774,7 +1314,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Mutation$DeleteFromRow",
+ "name": "com.google.bigtable.v2.RequestStats",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -783,7 +1323,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Mutation$DeleteFromRow$Builder",
+ "name": "com.google.bigtable.v2.RequestStats$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -792,7 +1332,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Mutation$SetCell",
+ "name": "com.google.bigtable.v2.ResponseParams",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -801,7 +1341,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Mutation$SetCell$Builder",
+ "name": "com.google.bigtable.v2.ResponseParams$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -810,7 +1350,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.PingAndWarmRequest",
+ "name": "com.google.bigtable.v2.ResultSetMetadata",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -819,7 +1359,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.PingAndWarmRequest$Builder",
+ "name": "com.google.bigtable.v2.ResultSetMetadata$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -828,7 +1368,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.PingAndWarmResponse",
+ "name": "com.google.bigtable.v2.Row",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -837,7 +1377,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.PingAndWarmResponse$Builder",
+ "name": "com.google.bigtable.v2.Row$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -846,7 +1386,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RateLimitInfo",
+ "name": "com.google.bigtable.v2.RowFilter",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -855,7 +1395,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RateLimitInfo$Builder",
+ "name": "com.google.bigtable.v2.RowFilter$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -864,7 +1404,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamRequest",
+ "name": "com.google.bigtable.v2.RowFilter$Chain",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -873,7 +1413,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamRequest$Builder",
+ "name": "com.google.bigtable.v2.RowFilter$Chain$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -882,7 +1422,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamResponse",
+ "name": "com.google.bigtable.v2.RowFilter$Condition",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -891,7 +1431,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamResponse$Builder",
+ "name": "com.google.bigtable.v2.RowFilter$Condition$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -900,7 +1440,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamResponse$CloseStream",
+ "name": "com.google.bigtable.v2.RowFilter$Interleave",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -909,7 +1449,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamResponse$CloseStream$Builder",
+ "name": "com.google.bigtable.v2.RowFilter$Interleave$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -918,7 +1458,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamResponse$DataChange",
+ "name": "com.google.bigtable.v2.RowRange",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -927,7 +1467,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamResponse$DataChange$Builder",
+ "name": "com.google.bigtable.v2.RowRange$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -936,7 +1476,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamResponse$DataChange$Type",
+ "name": "com.google.bigtable.v2.RowSet",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -945,7 +1485,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamResponse$Heartbeat",
+ "name": "com.google.bigtable.v2.RowSet$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -954,7 +1494,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamResponse$Heartbeat$Builder",
+ "name": "com.google.bigtable.v2.SampleRowKeysRequest",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -963,7 +1503,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamResponse$MutationChunk",
+ "name": "com.google.bigtable.v2.SampleRowKeysRequest$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -972,7 +1512,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamResponse$MutationChunk$Builder",
+ "name": "com.google.bigtable.v2.SampleRowKeysResponse",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -981,7 +1521,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamResponse$MutationChunk$ChunkInfo",
+ "name": "com.google.bigtable.v2.SampleRowKeysResponse$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -990,7 +1530,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadChangeStreamResponse$MutationChunk$ChunkInfo$Builder",
+ "name": "com.google.bigtable.v2.StreamContinuationToken",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -999,7 +1539,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadIterationStats",
+ "name": "com.google.bigtable.v2.StreamContinuationToken$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1008,7 +1548,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadIterationStats$Builder",
+ "name": "com.google.bigtable.v2.StreamContinuationTokens",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1017,7 +1557,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadModifyWriteRowRequest",
+ "name": "com.google.bigtable.v2.StreamContinuationTokens$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1026,7 +1566,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadModifyWriteRowRequest$Builder",
+ "name": "com.google.bigtable.v2.StreamPartition",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1035,7 +1575,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadModifyWriteRowResponse",
+ "name": "com.google.bigtable.v2.StreamPartition$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1044,7 +1584,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadModifyWriteRowResponse$Builder",
+ "name": "com.google.bigtable.v2.TimestampRange",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1053,7 +1593,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadModifyWriteRule",
+ "name": "com.google.bigtable.v2.TimestampRange$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1062,7 +1602,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadModifyWriteRule$Builder",
+ "name": "com.google.bigtable.v2.Type",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1071,7 +1611,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadRowsRequest",
+ "name": "com.google.bigtable.v2.Type$Aggregate",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1080,7 +1620,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadRowsRequest$Builder",
+ "name": "com.google.bigtable.v2.Type$Aggregate$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1089,7 +1629,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadRowsRequest$RequestStatsView",
+ "name": "com.google.bigtable.v2.Type$Aggregate$HyperLogLogPlusPlusUniqueCount",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1098,7 +1638,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadRowsResponse",
+ "name": "com.google.bigtable.v2.Type$Aggregate$HyperLogLogPlusPlusUniqueCount$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1107,7 +1647,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadRowsResponse$Builder",
+ "name": "com.google.bigtable.v2.Type$Aggregate$Max",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1116,7 +1656,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadRowsResponse$CellChunk",
+ "name": "com.google.bigtable.v2.Type$Aggregate$Max$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1125,7 +1665,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ReadRowsResponse$CellChunk$Builder",
+ "name": "com.google.bigtable.v2.Type$Aggregate$Min",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1134,7 +1674,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RequestLatencyStats",
+ "name": "com.google.bigtable.v2.Type$Aggregate$Min$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1143,7 +1683,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RequestLatencyStats$Builder",
+ "name": "com.google.bigtable.v2.Type$Aggregate$Sum",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1152,7 +1692,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RequestStats",
+ "name": "com.google.bigtable.v2.Type$Aggregate$Sum$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1161,7 +1701,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RequestStats$Builder",
+ "name": "com.google.bigtable.v2.Type$Array",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1170,7 +1710,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ResponseParams",
+ "name": "com.google.bigtable.v2.Type$Array$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1179,7 +1719,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.ResponseParams$Builder",
+ "name": "com.google.bigtable.v2.Type$Bool",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1188,7 +1728,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Row",
+ "name": "com.google.bigtable.v2.Type$Bool$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1197,7 +1737,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.Row$Builder",
+ "name": "com.google.bigtable.v2.Type$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1206,7 +1746,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RowFilter",
+ "name": "com.google.bigtable.v2.Type$Bytes",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1215,7 +1755,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RowFilter$Builder",
+ "name": "com.google.bigtable.v2.Type$Bytes$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1224,7 +1764,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RowFilter$Chain",
+ "name": "com.google.bigtable.v2.Type$Bytes$Encoding",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1233,7 +1773,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RowFilter$Chain$Builder",
+ "name": "com.google.bigtable.v2.Type$Bytes$Encoding$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1242,7 +1782,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RowFilter$Condition",
+ "name": "com.google.bigtable.v2.Type$Bytes$Encoding$Raw",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1251,7 +1791,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RowFilter$Condition$Builder",
+ "name": "com.google.bigtable.v2.Type$Bytes$Encoding$Raw$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1260,7 +1800,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RowFilter$Interleave",
+ "name": "com.google.bigtable.v2.Type$Date",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1269,7 +1809,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RowFilter$Interleave$Builder",
+ "name": "com.google.bigtable.v2.Type$Date$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1278,7 +1818,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RowRange",
+ "name": "com.google.bigtable.v2.Type$Float32",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1287,7 +1827,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RowRange$Builder",
+ "name": "com.google.bigtable.v2.Type$Float32$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1296,7 +1836,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RowSet",
+ "name": "com.google.bigtable.v2.Type$Float64",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1305,7 +1845,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.RowSet$Builder",
+ "name": "com.google.bigtable.v2.Type$Float64$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1314,7 +1854,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.SampleRowKeysRequest",
+ "name": "com.google.bigtable.v2.Type$Int64",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1323,7 +1863,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.SampleRowKeysRequest$Builder",
+ "name": "com.google.bigtable.v2.Type$Int64$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1332,7 +1872,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.SampleRowKeysResponse",
+ "name": "com.google.bigtable.v2.Type$Int64$Encoding",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1341,7 +1881,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.SampleRowKeysResponse$Builder",
+ "name": "com.google.bigtable.v2.Type$Int64$Encoding$BigEndianBytes",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1350,7 +1890,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.StreamContinuationToken",
+ "name": "com.google.bigtable.v2.Type$Int64$Encoding$BigEndianBytes$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1359,7 +1899,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.StreamContinuationToken$Builder",
+ "name": "com.google.bigtable.v2.Type$Int64$Encoding$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1368,7 +1908,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.StreamContinuationTokens",
+ "name": "com.google.bigtable.v2.Type$Map",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1377,7 +1917,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.StreamContinuationTokens$Builder",
+ "name": "com.google.bigtable.v2.Type$Map$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1386,7 +1926,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.StreamPartition",
+ "name": "com.google.bigtable.v2.Type$String",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1395,7 +1935,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.StreamPartition$Builder",
+ "name": "com.google.bigtable.v2.Type$String$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1404,7 +1944,7 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.TimestampRange",
+ "name": "com.google.bigtable.v2.Type$String$Encoding",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -1413,7 +1953,79 @@
"allPublicClasses": true
},
{
- "name": "com.google.bigtable.v2.TimestampRange$Builder",
+ "name": "com.google.bigtable.v2.Type$String$Encoding$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Type$String$Encoding$Utf8Bytes",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Type$String$Encoding$Utf8Bytes$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Type$Struct",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Type$Struct$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Type$Struct$Field",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Type$Struct$Field$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Type$Timestamp",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.v2.Type$Timestamp$Builder",
"queryAllDeclaredConstructors": true,
"queryAllPublicConstructors": true,
"queryAllDeclaredMethods": true,
@@ -2419,5 +3031,23 @@
"allPublicMethods": true,
"allDeclaredClasses": true,
"allPublicClasses": true
+ },
+ {
+ "name": "com.google.type.Date",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.type.Date$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
}
]
\ No newline at end of file
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/common/TypeTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/common/TypeTest.java
new file mode 100644
index 0000000000..aba13bfa12
--- /dev/null
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/common/TypeTest.java
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.common;
+
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.arrayType;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.bytesType;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.int64Type;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.mapType;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.stringType;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.structField;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.structType;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.timestampType;
+import static com.google.common.truth.Truth.assertThat;
+import static org.junit.Assert.assertThrows;
+
+import com.google.cloud.bigtable.common.Type.SchemalessStruct;
+import com.google.cloud.bigtable.common.Type.StructWithSchema;
+import com.google.cloud.bigtable.data.v2.models.sql.SqlType;
+import com.google.cloud.bigtable.data.v2.models.sql.Struct;
+import com.google.common.testing.EqualsTester;
+import com.google.protobuf.ByteString;
+import java.util.List;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+@RunWith(JUnit4.class)
+public class TypeTest {
+
+ @Test
+ public void simpleTypes_TypeToString() {
+ assertThat(Type.String.create().toString()).isEqualTo("STRING");
+ assertThat(Type.Bytes.create().toString()).isEqualTo("BYTES");
+ assertThat(Type.Int64.create().toString()).isEqualTo("INT64");
+ assertThat(Type.Float64.create().toString()).isEqualTo("FLOAT64");
+ assertThat(Type.Float32.create().toString()).isEqualTo("FLOAT32");
+ assertThat(Type.Bool.create().toString()).isEqualTo("BOOL");
+ assertThat(Type.Timestamp.create().toString()).isEqualTo("TIMESTAMP");
+ assertThat(Type.Date.create().toString()).isEqualTo("DATE");
+ assertThat(Type.SchemalessStruct.create().toString()).isEqualTo("STRUCT");
+ }
+
+ @Test
+ public void simpleTypes_equals() {
+ assertThat(Type.String.create()).isEqualTo(Type.String.create());
+ assertThat(Type.Bytes.create()).isEqualTo(Type.Bytes.create());
+ assertThat(Type.Int64.create()).isEqualTo(Type.Int64.create());
+ assertThat(Type.Float32.create()).isEqualTo(Type.Float32.create());
+ assertThat(Type.Float64.create()).isEqualTo(Type.Float64.create());
+ assertThat(Type.Bool.create()).isEqualTo(Type.Bool.create());
+ assertThat(Type.Timestamp.create()).isEqualTo(Type.Timestamp.create());
+ assertThat(Type.Date.create()).isEqualTo(Type.Date.create());
+ assertThat(Type.SchemalessStruct.create()).isEqualTo(Type.SchemalessStruct.create());
+
+ assertThat(Type.String.create()).isNotEqualTo(Type.Bytes.create());
+ assertThat(Type.Bytes.create()).isNotEqualTo(Type.String.create());
+ assertThat(Type.Int64.create()).isNotEqualTo(Type.String.create());
+ assertThat(Type.Float32.create()).isNotEqualTo(Type.String.create());
+ assertThat(Type.Float64.create()).isNotEqualTo(Type.String.create());
+ assertThat(Type.Bool.create()).isNotEqualTo(Type.String.create());
+ assertThat(Type.Timestamp.create()).isNotEqualTo(Type.String.create());
+ assertThat(Type.Date.create()).isNotEqualTo(Type.String.create());
+ assertThat(Type.SchemalessStruct.create()).isNotEqualTo(Type.String.create());
+ }
+
+ @Test
+ public void array_equals() {
+ assertThat(Type.Array.create(Type.String.create()))
+ .isEqualTo(Type.Array.create(Type.String.create()));
+ assertThat(Type.Array.create(Type.String.create()))
+ .isNotEqualTo(Type.Array.create(Type.Bytes.create()));
+ // Nested arrays
+ assertThat(Type.Array.create(Type.Array.create(Type.String.create())))
+ .isEqualTo(Type.Array.create(Type.Array.create(Type.String.create())));
+ assertThat(Type.Array.create(Type.Array.create(Type.String.create())))
+ .isNotEqualTo(Type.Array.create(Type.Array.create(Type.Bytes.create())));
+ }
+
+ @Test
+ public void map_equals() {
+ assertThat(Type.Map.create(Type.Bytes.create(), Type.String.create()))
+ .isEqualTo(Type.Map.create(Type.Bytes.create(), Type.String.create()));
+ assertThat(Type.Map.create(Type.Bytes.create(), Type.String.create()))
+ .isNotEqualTo(Type.Map.create(Type.String.create(), Type.String.create()));
+ assertThat(Type.Map.create(Type.Bytes.create(), Type.String.create()))
+ .isNotEqualTo(Type.Map.create(Type.Bytes.create(), Type.Bytes.create()));
+ // Nested Maps
+ assertThat(
+ Type.Map.create(
+ Type.Bytes.create(), Type.Map.create(Type.String.create(), Type.Bytes.create())))
+ .isEqualTo(
+ Type.Map.create(
+ Type.Bytes.create(), Type.Map.create(Type.String.create(), Type.Bytes.create())));
+ assertThat(
+ Type.Map.create(
+ Type.Bytes.create(), Type.Map.create(Type.String.create(), Type.Bytes.create())))
+ .isNotEqualTo(
+ Type.Map.create(
+ Type.Bytes.create(), Type.Map.create(Type.String.create(), Type.String.create())));
+ }
+
+ @Test
+ public void structWithSchema_equals() {
+ com.google.bigtable.v2.Type structProto =
+ structType(structField("timestamp", timestampType()), structField("value", bytesType()));
+ com.google.bigtable.v2.Type complexStructProto =
+ structType(
+ structField("map", mapType(stringType(), bytesType())),
+ structField("array", arrayType(stringType())));
+ new EqualsTester()
+ .addEqualityGroup(
+ StructWithSchema.fromProto(structProto.getStructType()),
+ StructWithSchema.fromProto(structProto.getStructType()))
+ .addEqualityGroup(
+ StructWithSchema.fromProto(complexStructProto.getStructType()),
+ StructWithSchema.fromProto(complexStructProto.getStructType()));
+ }
+
+ @Test
+ public void structWithSchema_fields() {
+ StructWithSchema struct =
+ StructWithSchema.fromProto(
+ structType(structField("timestamp", timestampType()), structField("value", bytesType()))
+ .getStructType());
+ assertThat(struct.getFields()).hasSize(2);
+ assertThat(struct.getFields().get(0).name()).isEqualTo("timestamp");
+ assertThat(struct.getFields().get(0).type()).isEqualTo(Type.Timestamp.create());
+ assertThat(struct.getType(0)).isEqualTo(Type.Timestamp.create());
+ assertThat(struct.getType("timestamp")).isEqualTo(Type.Timestamp.create());
+ assertThat(struct.getColumnIndex("timestamp")).isEqualTo(0);
+
+ assertThat(struct.getFields().get(1).name()).isEqualTo("value");
+ assertThat(struct.getFields().get(1).type()).isEqualTo(Type.Bytes.create());
+ assertThat(struct.getType(1)).isEqualTo(Type.Bytes.create());
+ assertThat(struct.getType("value")).isEqualTo(Type.Bytes.create());
+ assertThat(struct.getColumnIndex("value")).isEqualTo(1);
+ }
+
+ @Test
+ public void structWithSchema_handlesAmbiguousFields() {
+ StructWithSchema struct =
+ StructWithSchema.fromProto(
+ structType(structField("foo", timestampType()), structField("foo", bytesType()))
+ .getStructType());
+ assertThat(struct.getFields()).hasSize(2);
+ assertThat(struct.getType(0)).isEqualTo(Type.Timestamp.create());
+ assertThat(struct.getType(1)).isEqualTo(Type.Bytes.create());
+
+ assertThrows(IllegalArgumentException.class, () -> struct.getType("foo"));
+ assertThrows(IllegalArgumentException.class, () -> struct.getColumnIndex("foo"));
+ }
+
+ @Test
+ public void structWithSchema_toString() {
+ StructWithSchema struct =
+ StructWithSchema.fromProto(
+ structType(structField("test", stringType()), structField("test2", int64Type()))
+ .getStructType());
+ assertThat(struct.toString())
+ .isEqualTo("STRUCT{fields=[Field{name=test, type=STRING}, Field{name=test2, type=INT64}]}");
+ }
+
+ @Test
+ public void schemalessStruct_throwsExceptionOnSchemaAccess() {
+ SchemalessStruct struct = Type.SchemalessStruct.create();
+
+ assertThrows(UnsupportedOperationException.class, () -> struct.getType("foo"));
+ assertThrows(UnsupportedOperationException.class, () -> struct.getType(0));
+ assertThrows(UnsupportedOperationException.class, () -> struct.getColumnIndex("foo"));
+ assertThrows(UnsupportedOperationException.class, struct::getFields);
+ }
+
+ @Test
+ public void array_toString() {
+ Type array = Type.Array.create(Type.String.create());
+
+ assertThat(array.toString()).isEqualTo("ARRAY{elementType=STRING}");
+ }
+
+ @Test
+ public void simpleMap_toString() {
+ Type map = Type.Map.create(Type.Bytes.create(), Type.String.create());
+
+ assertThat(map.toString()).isEqualTo("MAP{keyType=BYTES, valueType=STRING}");
+ }
+
+ @Test
+ public void historicalMap_toString() {
+ SqlType.Map> historicalMap = SqlType.historicalMap();
+
+ assertThat(historicalMap.toString())
+ .isEqualTo("MAP{keyType=BYTES, valueType=ARRAY{elementType=STRUCT}}");
+ }
+}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/AbstractProtoStructReaderTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/AbstractProtoStructReaderTest.java
new file mode 100644
index 0000000000..68c88f775e
--- /dev/null
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/AbstractProtoStructReaderTest.java
@@ -0,0 +1,677 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.internal;
+
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.arrayType;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.arrayValue;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.boolType;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.boolValue;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.bytesType;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.bytesValue;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.columnMetadata;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.dateType;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.dateValue;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.float32Type;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.float64Type;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.floatValue;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.int64Type;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.int64Value;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.mapElement;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.mapType;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.mapValue;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.metadata;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.nullValue;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.stringType;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.stringValue;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.structField;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.structType;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.structValue;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.timestampType;
+import static com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory.timestampValue;
+import static com.google.common.truth.Truth.assertThat;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+
+import com.google.auto.value.AutoValue;
+import com.google.bigtable.v2.ColumnMetadata;
+import com.google.bigtable.v2.Type;
+import com.google.bigtable.v2.Type.KindCase;
+import com.google.bigtable.v2.Value;
+import com.google.cloud.Date;
+import com.google.cloud.bigtable.data.v2.models.sql.ResultSetMetadata;
+import com.google.cloud.bigtable.data.v2.models.sql.SqlType;
+import com.google.cloud.bigtable.data.v2.models.sql.Struct;
+import com.google.cloud.bigtable.data.v2.stub.sql.SqlProtoFactory;
+import com.google.protobuf.ByteString;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.BiFunction;
+import java.util.stream.Collectors;
+import org.junit.Test;
+import org.junit.experimental.runners.Enclosed;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.threeten.bp.Instant;
+
+@RunWith(Enclosed.class)
+public class AbstractProtoStructReaderTest {
+
+ // Timestamp can be in micros up to max long
+ private static final long MAX_TS_SECONDS = Long.MAX_VALUE / 1000 / 1000;
+
+ @AutoValue
+ public abstract static class TestProtoStruct extends AbstractProtoStructReader {
+ public static TestProtoStruct create(ResultSetMetadata metadata, List values) {
+ return new AutoValue_AbstractProtoStructReaderTest_TestProtoStruct(values, metadata);
+ }
+
+ abstract ResultSetMetadata metadata();
+
+ @Override
+ public int getColumnIndex(String columnName) {
+ return metadata().getColumnIndex(columnName);
+ }
+
+ @Override
+ public SqlType> getColumnType(int columnIndex) {
+ return metadata().getColumnType(columnIndex);
+ }
+ }
+
+ // New tests should always be added to types test
+ // Specific tests we don't want to re-run for each type go here
+ @RunWith(JUnit4.class)
+ public static class OneOffTests {
+ @Test
+ public void simpleMapField_validatesType() {
+ TestProtoStruct structWithMap =
+ TestProtoStruct.create(
+ ProtoResultSetMetadata.fromProto(
+ metadata(columnMetadata("testField", mapType(bytesType(), stringType())))
+ .getMetadata()),
+ Collections.singletonList(
+ mapValue(
+ mapElement(bytesValue("foo"), stringValue("bar")),
+ mapElement(bytesValue("key"), stringValue("val")))));
+ HashMap expectedMap = new HashMap<>();
+ expectedMap.put(ByteString.copyFromUtf8("foo"), "bar");
+ expectedMap.put(ByteString.copyFromUtf8("key"), "val");
+
+ assertThat(
+ structWithMap.getMap("testField", SqlType.mapOf(SqlType.bytes(), SqlType.string())))
+ .isEqualTo(expectedMap);
+ assertThat(structWithMap.getMap(0, SqlType.mapOf(SqlType.bytes(), SqlType.string())))
+ .isEqualTo(expectedMap);
+
+ assertThrows(
+ IllegalStateException.class,
+ () -> structWithMap.getMap("testField", SqlType.mapOf(SqlType.bytes(), SqlType.bytes())));
+ assertThrows(
+ IllegalStateException.class,
+ () -> structWithMap.getMap("testField", SqlType.mapOf(SqlType.bytes(), SqlType.bytes())));
+ assertThrows(
+ IllegalStateException.class,
+ () -> structWithMap.getMap(0, SqlType.mapOf(SqlType.bytes(), SqlType.bytes())));
+ assertThrows(
+ IllegalStateException.class,
+ () -> structWithMap.getMap(0, SqlType.mapOf(SqlType.bytes(), SqlType.bytes())));
+ }
+
+ @Test
+ public void nestedMapField_validatesType() {
+ TestProtoStruct historicalMap =
+ TestProtoStruct.create(
+ ProtoResultSetMetadata.fromProto(
+ metadata(
+ columnMetadata(
+ "testField",
+ mapType(
+ bytesType(),
+ arrayType(
+ structType(
+ structField("timestamp", timestampType()),
+ structField("value", bytesType()))))))
+ .getMetadata()),
+ Collections.singletonList(
+ mapValue(
+ mapElement(
+ bytesValue("qual"),
+ arrayValue(
+ structValue(timestampValue(10000, 100), bytesValue("test1")),
+ structValue(timestampValue(20000, 100), bytesValue("test2")))))));
+
+ HashMap> expectedMap = new HashMap<>();
+ expectedMap.put(
+ ByteString.copyFromUtf8("qual"),
+ Arrays.asList(
+ ProtoStruct.create(
+ (SqlType.Struct)
+ SqlType.fromProto(
+ structType(
+ structField("timestamp", timestampType()),
+ structField("value", bytesType()))),
+ arrayValue(timestampValue(10000, 100), bytesValue("test1")).getArrayValue()),
+ ProtoStruct.create(
+ (SqlType.Struct)
+ SqlType.fromProto(
+ structType(
+ structField("timestamp", timestampType()),
+ structField("value", bytesType()))),
+ arrayValue(timestampValue(20000, 100), bytesValue("test2")).getArrayValue())));
+
+ assertThat(historicalMap.getMap("testField", SqlType.historicalMap())).isEqualTo(expectedMap);
+ assertThat(historicalMap.getMap(0, SqlType.historicalMap())).isEqualTo(expectedMap);
+
+ assertThrows(
+ IllegalStateException.class,
+ () -> historicalMap.getMap("testField", SqlType.mapOf(SqlType.bytes(), SqlType.bytes())));
+ assertThrows(
+ IllegalStateException.class,
+ () ->
+ historicalMap.getMap(
+ "testField", SqlType.mapOf(SqlType.bytes(), SqlType.arrayOf(SqlType.string()))));
+ assertThrows(
+ IllegalStateException.class,
+ () -> historicalMap.getMap(0, SqlType.mapOf(SqlType.bytes(), SqlType.bytes())));
+ assertThrows(
+ IllegalStateException.class,
+ () ->
+ historicalMap.getMap(
+ 0, SqlType.mapOf(SqlType.bytes(), SqlType.arrayOf(SqlType.string()))));
+ }
+
+ @Test
+ public void arrayField_validatesType() {
+ TestProtoStruct structWithList =
+ TestProtoStruct.create(
+ ProtoResultSetMetadata.fromProto(
+ metadata(columnMetadata("testField", arrayType(stringType()))).getMetadata()),
+ Collections.singletonList(arrayValue(stringValue("foo"), stringValue("bar"))));
+ List expectedList = Arrays.asList("foo", "bar");
+
+ assertThat(structWithList.getList("testField", SqlType.arrayOf(SqlType.string())))
+ .isEqualTo(expectedList);
+ assertThat(structWithList.getList(0, SqlType.arrayOf(SqlType.string())))
+ .isEqualTo(expectedList);
+
+ assertThrows(
+ IllegalStateException.class,
+ () -> structWithList.getList("testField", SqlType.arrayOf(SqlType.bytes())));
+ assertThrows(
+ IllegalStateException.class,
+ () -> structWithList.getList(0, SqlType.arrayOf(SqlType.bytes())));
+ }
+ }
+
+ @RunWith(Parameterized.class)
+ public static class TypesTest {
+ @Parameterized.Parameters()
+ public static List parameters() {
+ return Arrays.asList(
+ new Object[][] {
+ // Bytes
+ {
+ Collections.singletonList(columnMetadata("testField", bytesType())),
+ Collections.singletonList(bytesValue("test")),
+ 0,
+ "testField",
+ (BiFunction) TestProtoStruct::getBytes,
+ (BiFunction) TestProtoStruct::getBytes,
+ ByteString.copyFromUtf8("test")
+ },
+ // String
+ {
+ Collections.singletonList(columnMetadata("testField", stringType())),
+ Collections.singletonList(stringValue("test")),
+ 0,
+ "testField",
+ (BiFunction) TestProtoStruct::getString,
+ (BiFunction) TestProtoStruct::getString,
+ "test"
+ },
+ // Long
+ {
+ Collections.singletonList(columnMetadata("testField", int64Type())),
+ Collections.singletonList(int64Value(110L)),
+ 0,
+ "testField",
+ (BiFunction) TestProtoStruct::getLong,
+ (BiFunction) TestProtoStruct::getLong,
+ 110L
+ },
+ // Double
+ {
+ Collections.singletonList(columnMetadata("testField", float64Type())),
+ Collections.singletonList(floatValue(100.3d)),
+ 0,
+ "testField",
+ (BiFunction) TestProtoStruct::getDouble,
+ (BiFunction) TestProtoStruct::getDouble,
+ 100.3d
+ },
+ // Float
+ {
+ Collections.singletonList(columnMetadata("testField", float32Type())),
+ Collections.singletonList(floatValue(100.3f)),
+ 0,
+ "testField",
+ (BiFunction) TestProtoStruct::getFloat,
+ (BiFunction) TestProtoStruct::getFloat,
+ 100.3f
+ },
+ // Boolean
+ {
+ Collections.singletonList(columnMetadata("testField", boolType())),
+ Collections.singletonList(boolValue(true)),
+ 0,
+ "testField",
+ (BiFunction) TestProtoStruct::getBoolean,
+ (BiFunction) TestProtoStruct::getBoolean,
+ true
+ },
+ // Timestamp
+ {
+ Collections.singletonList(columnMetadata("testField", timestampType())),
+ Collections.singletonList(timestampValue(1000000, 100)),
+ 0,
+ "testField",
+ (BiFunction