diff --git a/.changelog/4155.txt b/.changelog/4155.txt new file mode 100644 index 00000000000..eb70eb6424f --- /dev/null +++ b/.changelog/4155.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +bigquery: Added BigTable source format in BigQuery table +``` diff --git a/google/resource_big_query_job.go b/google/resource_big_query_job.go index 9b977839d5c..2fbc41872b7 100644 --- a/google/resource_big_query_job.go +++ b/google/resource_big_query_job.go @@ -516,7 +516,8 @@ row N is just skipped. Otherwise row N is used to extract column names for the d ForceNew: true, Description: `The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". -For orc, specify "ORC". The default value is CSV.`, +For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". +The default value is CSV.`, Default: "CSV", }, "time_partitioning": { diff --git a/google/resource_bigquery_table.go b/google/resource_bigquery_table.go index c9ea365ab21..4927593005c 100644 --- a/google/resource_bigquery_table.go +++ b/google/resource_bigquery_table.go @@ -389,7 +389,7 @@ func resourceBigQueryTable() *schema.Resource { Required: true, Description: `The data format. Supported values are: "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "PARQUET", "ORC" and "DATASTORE_BACKUP". To use "GOOGLE_SHEETS" the scopes must include "googleapis.com/auth/drive.readonly".`, ValidateFunc: validation.StringInSlice([]string{ - "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "DATASTORE_BACKUP", "PARQUET", "ORC", + "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "DATASTORE_BACKUP", "PARQUET", "ORC", "BIGTABLE", }, false), }, // SourceURIs [Required] The fully-qualified URIs that point to your data in Google Cloud. diff --git a/google/resource_bigquery_table_test.go b/google/resource_bigquery_table_test.go index 41ff303be75..cc3c93520c4 100644 --- a/google/resource_bigquery_table_test.go +++ b/google/resource_bigquery_table_test.go @@ -425,6 +425,32 @@ func TestAccBigQueryExternalDataTable_CSV(t *testing.T) { }) } +func TestAccBigQueryDataTable_bigtable(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 8), + "project": getTestProjectFromEnv(), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromBigtable(context), + }, + { + ResourceName: "google_bigquery_table.table", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + func TestAccBigQueryDataTable_sheet(t *testing.T) { t.Parallel() @@ -1406,6 +1432,53 @@ resource "google_bigquery_table" "test" { `, datasetID, bucketName, objectName, content, tableID, format, quoteChar) } +func testAccBigQueryTableFromBigtable(context map[string]interface{}) string { + return Nprintf(` + resource "google_bigtable_instance" "instance" { + name = "tf-test-bigtable-inst-%{random_suffix}" + cluster { + cluster_id = "tf-test-bigtable-%{random_suffix}" + zone = "us-central1-b" + } + instance_type = "DEVELOPMENT" + deletion_protection = false + } + resource "google_bigtable_table" "table" { + name = "%{random_suffix}" + instance_name = google_bigtable_instance.instance.name + column_family { + family = "cf-%{random_suffix}-first" + } + column_family { + family = "cf-%{random_suffix}-second" + } + } + resource "google_bigquery_table" "table" { + deletion_protection = false + dataset_id = google_bigquery_dataset.dataset.dataset_id + table_id = "tf_test_bigtable_%{random_suffix}" + external_data_configuration { + autodetect = true + source_format = "BIGTABLE" + ignore_unknown_values = true + source_uris = [ + "https://googleapis.com/bigtable/${google_bigtable_table.table.id}", + ] + } + } + resource "google_bigquery_dataset" "dataset" { + dataset_id = "tf_test_ds_%{random_suffix}" + friendly_name = "test" + description = "This is a test description" + location = "EU" + default_table_expiration_ms = 3600000 + labels = { + env = "default" + } + } +`, context) +} + func testAccBigQueryTableFromSheet(context map[string]interface{}) string { return Nprintf(` resource "google_bigquery_table" "table" { diff --git a/website/docs/r/bigquery_job.html.markdown b/website/docs/r/bigquery_job.html.markdown index 635f8a5d4ed..d2bcf10c673 100644 --- a/website/docs/r/bigquery_job.html.markdown +++ b/website/docs/r/bigquery_job.html.markdown @@ -644,7 +644,8 @@ The `load` block supports: (Optional) The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". - For orc, specify "ORC". The default value is CSV. + For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". + The default value is CSV. * `allow_jagged_rows` - (Optional) diff --git a/website/docs/r/bigquery_table.html.markdown b/website/docs/r/bigquery_table.html.markdown index 036940cd14b..5c563cc9722 100644 --- a/website/docs/r/bigquery_table.html.markdown +++ b/website/docs/r/bigquery_table.html.markdown @@ -186,8 +186,8 @@ The `external_data_configuration` block supports: `google_bigquery_table.schema` * `source_format` (Required) - The data format. Supported values are: - "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "PARQUET", "ORC" - and "DATASTORE_BACKUP". To use "GOOGLE_SHEETS" + "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "PARQUET", "ORC", + "DATSTORE_BACKUP", and "BIGTABLE". To use "GOOGLE_SHEETS" the `scopes` must include "https://www.googleapis.com/auth/drive.readonly".